diff options
Diffstat (limited to 'drivers/net')
129 files changed, 1383 insertions, 959 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 78dde56ae6e6..d5fe5d5f490f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -82,6 +82,8 @@ | |||
| 82 | #include <net/bond_3ad.h> | 82 | #include <net/bond_3ad.h> |
| 83 | #include <net/bond_alb.h> | 83 | #include <net/bond_alb.h> |
| 84 | 84 | ||
| 85 | #include "bonding_priv.h" | ||
| 86 | |||
| 85 | /*---------------------------- Module parameters ----------------------------*/ | 87 | /*---------------------------- Module parameters ----------------------------*/ |
| 86 | 88 | ||
| 87 | /* monitor all links that often (in milliseconds). <=0 disables monitoring */ | 89 | /* monitor all links that often (in milliseconds). <=0 disables monitoring */ |
| @@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void) | |||
| 4542 | int bond_create(struct net *net, const char *name) | 4544 | int bond_create(struct net *net, const char *name) |
| 4543 | { | 4545 | { |
| 4544 | struct net_device *bond_dev; | 4546 | struct net_device *bond_dev; |
| 4547 | struct bonding *bond; | ||
| 4548 | struct alb_bond_info *bond_info; | ||
| 4545 | int res; | 4549 | int res; |
| 4546 | 4550 | ||
| 4547 | rtnl_lock(); | 4551 | rtnl_lock(); |
| @@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name) | |||
| 4555 | return -ENOMEM; | 4559 | return -ENOMEM; |
| 4556 | } | 4560 | } |
| 4557 | 4561 | ||
| 4562 | /* | ||
| 4563 | * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX. | ||
| 4564 | * It is set to 0 by default which is wrong. | ||
| 4565 | */ | ||
| 4566 | bond = netdev_priv(bond_dev); | ||
| 4567 | bond_info = &(BOND_ALB_INFO(bond)); | ||
| 4568 | bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; | ||
| 4569 | |||
| 4558 | dev_net_set(bond_dev, net); | 4570 | dev_net_set(bond_dev, net); |
| 4559 | bond_dev->rtnl_link_ops = &bond_link_ops; | 4571 | bond_dev->rtnl_link_ops = &bond_link_ops; |
| 4560 | 4572 | ||
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 4df28943d222..e8d3c1d35453 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
| @@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond, | |||
| 624 | out: | 624 | out: |
| 625 | if (ret) | 625 | if (ret) |
| 626 | bond_opt_error_interpret(bond, opt, ret, val); | 626 | bond_opt_error_interpret(bond, opt, ret, val); |
| 627 | else | 627 | else if (bond->dev->reg_state == NETREG_REGISTERED) |
| 628 | call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); | 628 | call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); |
| 629 | 629 | ||
| 630 | return ret; | 630 | return ret; |
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 62694cfc05b6..b20b35acb47d 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <net/netns/generic.h> | 4 | #include <net/netns/generic.h> |
| 5 | #include <net/bonding.h> | 5 | #include <net/bonding.h> |
| 6 | 6 | ||
| 7 | #include "bonding_priv.h" | ||
| 7 | 8 | ||
| 8 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | 9 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) |
| 9 | __acquires(RCU) | 10 | __acquires(RCU) |
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h new file mode 100644 index 000000000000..5a4d81a9437c --- /dev/null +++ b/drivers/net/bonding/bonding_priv.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. | ||
| 3 | * | ||
| 4 | * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes | ||
| 5 | * NCM: Network and Communications Management, Inc. | ||
| 6 | * | ||
| 7 | * BUT, I'm the one who modified it for ethernet, so: | ||
| 8 | * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov | ||
| 9 | * | ||
| 10 | * This software may be used and distributed according to the terms | ||
| 11 | * of the GNU Public License, incorporated herein by reference. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef _BONDING_PRIV_H | ||
| 16 | #define _BONDING_PRIV_H | ||
| 17 | |||
| 18 | #define DRV_VERSION "3.7.1" | ||
| 19 | #define DRV_RELDATE "April 27, 2011" | ||
| 20 | #define DRV_NAME "bonding" | ||
| 21 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | ||
| 22 | |||
| 23 | #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" | ||
| 24 | |||
| 25 | #endif | ||
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 58808f651452..e8c96b8e86f4 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
| @@ -112,7 +112,7 @@ config PCH_CAN | |||
| 112 | 112 | ||
| 113 | config CAN_GRCAN | 113 | config CAN_GRCAN |
| 114 | tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" | 114 | tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" |
| 115 | depends on OF | 115 | depends on OF && HAS_DMA |
| 116 | ---help--- | 116 | ---help--- |
| 117 | Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. | 117 | Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. |
| 118 | Note that the driver supports little endian, even though little | 118 | Note that the driver supports little endian, even though little |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 4643914859b2..8b17a9065b0b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
| @@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, | |||
| 1102 | 1102 | ||
| 1103 | if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | | 1103 | if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | |
| 1104 | MSG_FLAG_NERR)) { | 1104 | MSG_FLAG_NERR)) { |
| 1105 | netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n", | 1105 | netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", |
| 1106 | msg->u.rx_can_header.flag); | 1106 | msg->u.rx_can_header.flag); |
| 1107 | 1107 | ||
| 1108 | stats->rx_errors++; | 1108 | stats->rx_errors++; |
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 6bddfe062b51..fc55e8e0351d 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c | |||
| @@ -509,10 +509,11 @@ static int xcan_rx(struct net_device *ndev) | |||
| 509 | cf->can_id |= CAN_RTR_FLAG; | 509 | cf->can_id |= CAN_RTR_FLAG; |
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | if (!(id_xcan & XCAN_IDR_SRR_MASK)) { | 512 | /* DW1/DW2 must always be read to remove message from RXFIFO */ |
| 513 | data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); | 513 | data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); |
| 514 | data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); | 514 | data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); |
| 515 | 515 | ||
| 516 | if (!(cf->can_id & CAN_RTR_FLAG)) { | ||
| 516 | /* Change Xilinx CAN data format to socketCAN data format */ | 517 | /* Change Xilinx CAN data format to socketCAN data format */ |
| 517 | if (cf->can_dlc > 0) | 518 | if (cf->can_dlc > 0) |
| 518 | *(__be32 *)(cf->data) = cpu_to_be32(data[0]); | 519 | *(__be32 *)(cf->data) = cpu_to_be32(data[0]); |
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index af639ab4c55b..cf309aa92802 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
| @@ -1469,6 +1469,9 @@ static void __exit mv88e6xxx_cleanup(void) | |||
| 1469 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) | 1469 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) |
| 1470 | unregister_switch_driver(&mv88e6171_switch_driver); | 1470 | unregister_switch_driver(&mv88e6171_switch_driver); |
| 1471 | #endif | 1471 | #endif |
| 1472 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) | ||
| 1473 | unregister_switch_driver(&mv88e6352_switch_driver); | ||
| 1474 | #endif | ||
| 1472 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) | 1475 | #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) |
| 1473 | unregister_switch_driver(&mv88e6123_61_65_switch_driver); | 1476 | unregister_switch_driver(&mv88e6123_61_65_switch_driver); |
| 1474 | #endif | 1477 | #endif |
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index b36ee9e0d220..d686b9cac29f 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c | |||
| @@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec) | |||
| 523 | char *s; | 523 | char *s; |
| 524 | 524 | ||
| 525 | if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { | 525 | if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { |
| 526 | printk(KERN_ERR "%s: unable to read podule description string\n", | 526 | printk(KERN_ERR "%s: unable to read module description string\n", |
| 527 | dev_name(&ec->dev)); | 527 | dev_name(&ec->dev)); |
| 528 | goto no_addr; | 528 | goto no_addr; |
| 529 | } | 529 | } |
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index eba070f16782..89cd11d86642 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h | |||
| @@ -58,15 +58,12 @@ struct msgdma_extended_desc { | |||
| 58 | /* Tx buffer control flags | 58 | /* Tx buffer control flags |
| 59 | */ | 59 | */ |
| 60 | #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ | 60 | #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ |
| 61 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
| 62 | MSGDMA_DESC_CTL_GO) | 61 | MSGDMA_DESC_CTL_GO) |
| 63 | 62 | ||
| 64 | #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | 63 | #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO) |
| 65 | MSGDMA_DESC_CTL_GO) | ||
| 66 | 64 | ||
| 67 | #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ | 65 | #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ |
| 68 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ | 66 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ |
| 69 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
| 70 | MSGDMA_DESC_CTL_GO) | 67 | MSGDMA_DESC_CTL_GO) |
| 71 | 68 | ||
| 72 | #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ | 69 | #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 90a76306ad0f..da48e66377b5 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
| @@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit) | |||
| 391 | "RCV pktstatus %08X pktlength %08X\n", | 391 | "RCV pktstatus %08X pktlength %08X\n", |
| 392 | pktstatus, pktlength); | 392 | pktstatus, pktlength); |
| 393 | 393 | ||
| 394 | /* DMA trasfer from TSE starts with 2 aditional bytes for | ||
| 395 | * IP payload alignment. Status returned by get_rx_status() | ||
| 396 | * contains DMA transfer length. Packet is 2 bytes shorter. | ||
| 397 | */ | ||
| 398 | pktlength -= 2; | ||
| 399 | |||
| 394 | count++; | 400 | count++; |
| 395 | next_entry = (++priv->rx_cons) % priv->rx_ring_size; | 401 | next_entry = (++priv->rx_cons) % priv->rx_ring_size; |
| 396 | 402 | ||
| @@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev) | |||
| 777 | struct altera_tse_private *priv = netdev_priv(dev); | 783 | struct altera_tse_private *priv = netdev_priv(dev); |
| 778 | struct phy_device *phydev; | 784 | struct phy_device *phydev; |
| 779 | struct device_node *phynode; | 785 | struct device_node *phynode; |
| 786 | bool fixed_link = false; | ||
| 787 | int rc = 0; | ||
| 780 | 788 | ||
| 781 | /* Avoid init phy in case of no phy present */ | 789 | /* Avoid init phy in case of no phy present */ |
| 782 | if (!priv->phy_iface) | 790 | if (!priv->phy_iface) |
| @@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev) | |||
| 789 | phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); | 797 | phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); |
| 790 | 798 | ||
| 791 | if (!phynode) { | 799 | if (!phynode) { |
| 792 | netdev_dbg(dev, "no phy-handle found\n"); | 800 | /* check if a fixed-link is defined in device-tree */ |
| 793 | if (!priv->mdio) { | 801 | if (of_phy_is_fixed_link(priv->device->of_node)) { |
| 794 | netdev_err(dev, | 802 | rc = of_phy_register_fixed_link(priv->device->of_node); |
| 795 | "No phy-handle nor local mdio specified\n"); | 803 | if (rc < 0) { |
| 796 | return -ENODEV; | 804 | netdev_err(dev, "cannot register fixed PHY\n"); |
| 805 | return rc; | ||
| 806 | } | ||
| 807 | |||
| 808 | /* In the case of a fixed PHY, the DT node associated | ||
| 809 | * to the PHY is the Ethernet MAC DT node. | ||
| 810 | */ | ||
| 811 | phynode = of_node_get(priv->device->of_node); | ||
| 812 | fixed_link = true; | ||
| 813 | |||
| 814 | netdev_dbg(dev, "fixed-link detected\n"); | ||
| 815 | phydev = of_phy_connect(dev, phynode, | ||
| 816 | &altera_tse_adjust_link, | ||
| 817 | 0, priv->phy_iface); | ||
| 818 | } else { | ||
| 819 | netdev_dbg(dev, "no phy-handle found\n"); | ||
| 820 | if (!priv->mdio) { | ||
| 821 | netdev_err(dev, "No phy-handle nor local mdio specified\n"); | ||
| 822 | return -ENODEV; | ||
| 823 | } | ||
| 824 | phydev = connect_local_phy(dev); | ||
| 797 | } | 825 | } |
| 798 | phydev = connect_local_phy(dev); | ||
| 799 | } else { | 826 | } else { |
| 800 | netdev_dbg(dev, "phy-handle found\n"); | 827 | netdev_dbg(dev, "phy-handle found\n"); |
| 801 | phydev = of_phy_connect(dev, phynode, | 828 | phydev = of_phy_connect(dev, phynode, |
| @@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev) | |||
| 819 | /* Broken HW is sometimes missing the pull-up resistor on the | 846 | /* Broken HW is sometimes missing the pull-up resistor on the |
| 820 | * MDIO line, which results in reads to non-existent devices returning | 847 | * MDIO line, which results in reads to non-existent devices returning |
| 821 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent | 848 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent |
| 822 | * device as well. | 849 | * device as well. If a fixed-link is used the phy_id is always 0. |
| 823 | * Note: phydev->phy_id is the result of reading the UID PHY registers. | 850 | * Note: phydev->phy_id is the result of reading the UID PHY registers. |
| 824 | */ | 851 | */ |
| 825 | if (phydev->phy_id == 0) { | 852 | if ((phydev->phy_id == 0) && !fixed_link) { |
| 826 | netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); | 853 | netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); |
| 827 | phy_disconnect(phydev); | 854 | phy_disconnect(phydev); |
| 828 | return -ENODEV; | 855 | return -ENODEV; |
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index c638c85f3954..426916036151 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig | |||
| @@ -179,7 +179,8 @@ config SUNLANCE | |||
| 179 | 179 | ||
| 180 | config AMD_XGBE | 180 | config AMD_XGBE |
| 181 | tristate "AMD 10GbE Ethernet driver" | 181 | tristate "AMD 10GbE Ethernet driver" |
| 182 | depends on (OF_NET || ACPI) && HAS_IOMEM | 182 | depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA |
| 183 | depends on ARM64 || COMPILE_TEST | ||
| 183 | select PHYLIB | 184 | select PHYLIB |
| 184 | select AMD_XGBE_PHY | 185 | select AMD_XGBE_PHY |
| 185 | select BITREVERSE | 186 | select BITREVERSE |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index db84ddcfec84..9fd6c69a8bac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
| @@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data) | |||
| 423 | if (napi_schedule_prep(napi)) { | 423 | if (napi_schedule_prep(napi)) { |
| 424 | /* Disable Tx and Rx interrupts */ | 424 | /* Disable Tx and Rx interrupts */ |
| 425 | if (pdata->per_channel_irq) | 425 | if (pdata->per_channel_irq) |
| 426 | disable_irq(channel->dma_irq); | 426 | disable_irq_nosync(channel->dma_irq); |
| 427 | else | 427 | else |
| 428 | xgbe_disable_rx_tx_ints(pdata); | 428 | xgbe_disable_rx_tx_ints(pdata); |
| 429 | 429 | ||
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index f4054d242f3c..19e38afbc5ee 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config NET_XGENE | 1 | config NET_XGENE |
| 2 | tristate "APM X-Gene SoC Ethernet Driver" | 2 | tristate "APM X-Gene SoC Ethernet Driver" |
| 3 | depends on HAS_DMA | 3 | depends on HAS_DMA |
| 4 | depends on ARCH_XGENE || COMPILE_TEST | ||
| 4 | select PHYLIB | 5 | select PHYLIB |
| 5 | help | 6 | help |
| 6 | This is the Ethernet driver for the on-chip ethernet interface on the | 7 | This is the Ethernet driver for the on-chip ethernet interface on the |
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 8e262e2b39b6..dea29ee24da4 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig | |||
| @@ -25,8 +25,7 @@ config ARC_EMAC_CORE | |||
| 25 | config ARC_EMAC | 25 | config ARC_EMAC |
| 26 | tristate "ARC EMAC support" | 26 | tristate "ARC EMAC support" |
| 27 | select ARC_EMAC_CORE | 27 | select ARC_EMAC_CORE |
| 28 | depends on OF_IRQ | 28 | depends on OF_IRQ && OF_NET && HAS_DMA |
| 29 | depends on OF_NET | ||
| 30 | ---help--- | 29 | ---help--- |
| 31 | On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x | 30 | On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x |
| 32 | non-standard on-chip ethernet device ARC EMAC 10/100 is used. | 31 | non-standard on-chip ethernet device ARC EMAC 10/100 is used. |
| @@ -35,7 +34,7 @@ config ARC_EMAC | |||
| 35 | config EMAC_ROCKCHIP | 34 | config EMAC_ROCKCHIP |
| 36 | tristate "Rockchip EMAC support" | 35 | tristate "Rockchip EMAC support" |
| 37 | select ARC_EMAC_CORE | 36 | select ARC_EMAC_CORE |
| 38 | depends on OF_IRQ && OF_NET && REGULATOR | 37 | depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA |
| 39 | ---help--- | 38 | ---help--- |
| 40 | Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. | 39 | Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. |
| 41 | This selects Rockchip SoC glue layer support for the | 40 | This selects Rockchip SoC glue layer support for the |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h index 74df16aef793..88a6271de5bc 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h | |||
| @@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw); | |||
| 129 | #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 | 129 | #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 |
| 130 | #define TWSI_CTRL_SW_LDSTART 0x800 | 130 | #define TWSI_CTRL_SW_LDSTART 0x800 |
| 131 | #define TWSI_CTRL_HW_LDSTART 0x1000 | 131 | #define TWSI_CTRL_HW_LDSTART 0x1000 |
| 132 | #define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F | 132 | #define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F |
| 133 | #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 | 133 | #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 |
| 134 | #define TWSI_CTRL_LD_EXIST 0x400000 | 134 | #define TWSI_CTRL_LD_EXIST 0x400000 |
| 135 | #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 | 135 | #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 77363d680532..a3b1c07ae0af 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
| @@ -2464,6 +2464,7 @@ err_out_powerdown: | |||
| 2464 | ssb_bus_may_powerdown(sdev->bus); | 2464 | ssb_bus_may_powerdown(sdev->bus); |
| 2465 | 2465 | ||
| 2466 | err_out_free_dev: | 2466 | err_out_free_dev: |
| 2467 | netif_napi_del(&bp->napi); | ||
| 2467 | free_netdev(dev); | 2468 | free_netdev(dev); |
| 2468 | 2469 | ||
| 2469 | out: | 2470 | out: |
| @@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev) | |||
| 2480 | b44_unregister_phy_one(bp); | 2481 | b44_unregister_phy_one(bp); |
| 2481 | ssb_device_disable(sdev, 0); | 2482 | ssb_device_disable(sdev, 0); |
| 2482 | ssb_bus_may_powerdown(sdev->bus); | 2483 | ssb_bus_may_powerdown(sdev->bus); |
| 2484 | netif_napi_del(&bp->napi); | ||
| 2483 | free_netdev(dev); | 2485 | free_netdev(dev); |
| 2484 | ssb_pcihost_set_power_state(sdev, PCI_D3hot); | 2486 | ssb_pcihost_set_power_state(sdev, PCI_D3hot); |
| 2485 | ssb_set_drvdata(sdev, NULL); | 2487 | ssb_set_drvdata(sdev, NULL); |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 7e3d87a88c76..e2c043eabbf3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
| @@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters { | |||
| 543 | u32 jbr; /* RO # of xmited jabber count*/ | 543 | u32 jbr; /* RO # of xmited jabber count*/ |
| 544 | u32 bytes; /* RO # of xmited byte count */ | 544 | u32 bytes; /* RO # of xmited byte count */ |
| 545 | u32 pok; /* RO # of xmited good pkt */ | 545 | u32 pok; /* RO # of xmited good pkt */ |
| 546 | u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ | 546 | u32 uc; /* RO (0x4f0) # of xmited unicast pkt */ |
| 547 | }; | 547 | }; |
| 548 | 548 | ||
| 549 | struct bcm_sysport_mib { | 549 | struct bcm_sysport_mib { |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index de77d3a74abc..21e3c38c7c75 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
| @@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) | |||
| 1260 | 1260 | ||
| 1261 | /* Poll again if more events arrived in the meantime */ | 1261 | /* Poll again if more events arrived in the meantime */ |
| 1262 | if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) | 1262 | if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) |
| 1263 | return handled; | 1263 | return weight; |
| 1264 | 1264 | ||
| 1265 | if (handled < weight) { | 1265 | if (handled < weight) { |
| 1266 | napi_complete(napi); | 1266 | napi_complete(napi); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 355d5fea5be9..1f82a04ce01a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -521,6 +521,7 @@ struct bnx2x_fp_txdata { | |||
| 521 | }; | 521 | }; |
| 522 | 522 | ||
| 523 | enum bnx2x_tpa_mode_t { | 523 | enum bnx2x_tpa_mode_t { |
| 524 | TPA_MODE_DISABLED, | ||
| 524 | TPA_MODE_LRO, | 525 | TPA_MODE_LRO, |
| 525 | TPA_MODE_GRO | 526 | TPA_MODE_GRO |
| 526 | }; | 527 | }; |
| @@ -589,7 +590,6 @@ struct bnx2x_fastpath { | |||
| 589 | 590 | ||
| 590 | /* TPA related */ | 591 | /* TPA related */ |
| 591 | struct bnx2x_agg_info *tpa_info; | 592 | struct bnx2x_agg_info *tpa_info; |
| 592 | u8 disable_tpa; | ||
| 593 | #ifdef BNX2X_STOP_ON_ERROR | 593 | #ifdef BNX2X_STOP_ON_ERROR |
| 594 | u64 tpa_queue_used; | 594 | u64 tpa_queue_used; |
| 595 | #endif | 595 | #endif |
| @@ -1545,9 +1545,7 @@ struct bnx2x { | |||
| 1545 | #define USING_MSIX_FLAG (1 << 5) | 1545 | #define USING_MSIX_FLAG (1 << 5) |
| 1546 | #define USING_MSI_FLAG (1 << 6) | 1546 | #define USING_MSI_FLAG (1 << 6) |
| 1547 | #define DISABLE_MSI_FLAG (1 << 7) | 1547 | #define DISABLE_MSI_FLAG (1 << 7) |
| 1548 | #define TPA_ENABLE_FLAG (1 << 8) | ||
| 1549 | #define NO_MCP_FLAG (1 << 9) | 1548 | #define NO_MCP_FLAG (1 << 9) |
| 1550 | #define GRO_ENABLE_FLAG (1 << 10) | ||
| 1551 | #define MF_FUNC_DIS (1 << 11) | 1549 | #define MF_FUNC_DIS (1 << 11) |
| 1552 | #define OWN_CNIC_IRQ (1 << 12) | 1550 | #define OWN_CNIC_IRQ (1 << 12) |
| 1553 | #define NO_ISCSI_OOO_FLAG (1 << 13) | 1551 | #define NO_ISCSI_OOO_FLAG (1 << 13) |
| @@ -1776,7 +1774,7 @@ struct bnx2x { | |||
| 1776 | int stats_state; | 1774 | int stats_state; |
| 1777 | 1775 | ||
| 1778 | /* used for synchronization of concurrent threads statistics handling */ | 1776 | /* used for synchronization of concurrent threads statistics handling */ |
| 1779 | struct mutex stats_lock; | 1777 | struct semaphore stats_lock; |
| 1780 | 1778 | ||
| 1781 | /* used by dmae command loader */ | 1779 | /* used by dmae command loader */ |
| 1782 | struct dmae_command stats_dmae; | 1780 | struct dmae_command stats_dmae; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2f63467bce46..ec56a9b65dc3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
| @@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
| 947 | u16 frag_size, pages; | 947 | u16 frag_size, pages; |
| 948 | #ifdef BNX2X_STOP_ON_ERROR | 948 | #ifdef BNX2X_STOP_ON_ERROR |
| 949 | /* sanity check */ | 949 | /* sanity check */ |
| 950 | if (fp->disable_tpa && | 950 | if (fp->mode == TPA_MODE_DISABLED && |
| 951 | (CQE_TYPE_START(cqe_fp_type) || | 951 | (CQE_TYPE_START(cqe_fp_type) || |
| 952 | CQE_TYPE_STOP(cqe_fp_type))) | 952 | CQE_TYPE_STOP(cqe_fp_type))) |
| 953 | BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", | 953 | BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", |
| 954 | CQE_TYPE(cqe_fp_type)); | 954 | CQE_TYPE(cqe_fp_type)); |
| 955 | #endif | 955 | #endif |
| 956 | 956 | ||
| @@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
| 1396 | DP(NETIF_MSG_IFUP, | 1396 | DP(NETIF_MSG_IFUP, |
| 1397 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | 1397 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); |
| 1398 | 1398 | ||
| 1399 | if (!fp->disable_tpa) { | 1399 | if (fp->mode != TPA_MODE_DISABLED) { |
| 1400 | /* Fill the per-aggregation pool */ | 1400 | /* Fill the per-aggregation pool */ |
| 1401 | for (i = 0; i < MAX_AGG_QS(bp); i++) { | 1401 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
| 1402 | struct bnx2x_agg_info *tpa_info = | 1402 | struct bnx2x_agg_info *tpa_info = |
| @@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
| 1410 | BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", | 1410 | BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", |
| 1411 | j); | 1411 | j); |
| 1412 | bnx2x_free_tpa_pool(bp, fp, i); | 1412 | bnx2x_free_tpa_pool(bp, fp, i); |
| 1413 | fp->disable_tpa = 1; | 1413 | fp->mode = TPA_MODE_DISABLED; |
| 1414 | break; | 1414 | break; |
| 1415 | } | 1415 | } |
| 1416 | dma_unmap_addr_set(first_buf, mapping, 0); | 1416 | dma_unmap_addr_set(first_buf, mapping, 0); |
| @@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
| 1438 | ring_prod); | 1438 | ring_prod); |
| 1439 | bnx2x_free_tpa_pool(bp, fp, | 1439 | bnx2x_free_tpa_pool(bp, fp, |
| 1440 | MAX_AGG_QS(bp)); | 1440 | MAX_AGG_QS(bp)); |
| 1441 | fp->disable_tpa = 1; | 1441 | fp->mode = TPA_MODE_DISABLED; |
| 1442 | ring_prod = 0; | 1442 | ring_prod = 0; |
| 1443 | break; | 1443 | break; |
| 1444 | } | 1444 | } |
| @@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
| 1560 | 1560 | ||
| 1561 | bnx2x_free_rx_bds(fp); | 1561 | bnx2x_free_rx_bds(fp); |
| 1562 | 1562 | ||
| 1563 | if (!fp->disable_tpa) | 1563 | if (fp->mode != TPA_MODE_DISABLED) |
| 1564 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); | 1564 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
| 1565 | } | 1565 | } |
| 1566 | } | 1566 | } |
| @@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) | |||
| 2477 | /* set the tpa flag for each queue. The tpa flag determines the queue | 2477 | /* set the tpa flag for each queue. The tpa flag determines the queue |
| 2478 | * minimal size so it must be set prior to queue memory allocation | 2478 | * minimal size so it must be set prior to queue memory allocation |
| 2479 | */ | 2479 | */ |
| 2480 | fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || | 2480 | if (bp->dev->features & NETIF_F_LRO) |
| 2481 | (bp->flags & GRO_ENABLE_FLAG && | ||
| 2482 | bnx2x_mtu_allows_gro(bp->dev->mtu))); | ||
| 2483 | if (bp->flags & TPA_ENABLE_FLAG) | ||
| 2484 | fp->mode = TPA_MODE_LRO; | 2481 | fp->mode = TPA_MODE_LRO; |
| 2485 | else if (bp->flags & GRO_ENABLE_FLAG) | 2482 | else if (bp->dev->features & NETIF_F_GRO && |
| 2483 | bnx2x_mtu_allows_gro(bp->dev->mtu)) | ||
| 2486 | fp->mode = TPA_MODE_GRO; | 2484 | fp->mode = TPA_MODE_GRO; |
| 2485 | else | ||
| 2486 | fp->mode = TPA_MODE_DISABLED; | ||
| 2487 | 2487 | ||
| 2488 | /* We don't want TPA on an FCoE L2 ring */ | 2488 | /* We don't want TPA if it's disabled in bp |
| 2489 | if (IS_FCOE_FP(fp)) | 2489 | * or if this is an FCoE L2 ring. |
| 2490 | fp->disable_tpa = 1; | 2490 | */ |
| 2491 | if (bp->disable_tpa || IS_FCOE_FP(fp)) | ||
| 2492 | fp->mode = TPA_MODE_DISABLED; | ||
| 2491 | } | 2493 | } |
| 2492 | 2494 | ||
| 2493 | int bnx2x_load_cnic(struct bnx2x *bp) | 2495 | int bnx2x_load_cnic(struct bnx2x *bp) |
| @@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
| 2608 | /* | 2610 | /* |
| 2609 | * Zero fastpath structures preserving invariants like napi, which are | 2611 | * Zero fastpath structures preserving invariants like napi, which are |
| 2610 | * allocated only once, fp index, max_cos, bp pointer. | 2612 | * allocated only once, fp index, max_cos, bp pointer. |
| 2611 | * Also set fp->disable_tpa and txdata_ptr. | 2613 | * Also set fp->mode and txdata_ptr. |
| 2612 | */ | 2614 | */ |
| 2613 | DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); | 2615 | DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); |
| 2614 | for_each_queue(bp, i) | 2616 | for_each_queue(bp, i) |
| @@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi) | |||
| 3247 | 3249 | ||
| 3248 | if ((bp->state == BNX2X_STATE_CLOSED) || | 3250 | if ((bp->state == BNX2X_STATE_CLOSED) || |
| 3249 | (bp->state == BNX2X_STATE_ERROR) || | 3251 | (bp->state == BNX2X_STATE_ERROR) || |
| 3250 | (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) | 3252 | (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) |
| 3251 | return LL_FLUSH_FAILED; | 3253 | return LL_FLUSH_FAILED; |
| 3252 | 3254 | ||
| 3253 | if (!bnx2x_fp_lock_poll(fp)) | 3255 | if (!bnx2x_fp_lock_poll(fp)) |
| @@ -4543,7 +4545,7 @@ alloc_mem_err: | |||
| 4543 | * In these cases we disable the queue | 4545 | * In these cases we disable the queue |
| 4544 | * Min size is different for OOO, TPA and non-TPA queues | 4546 | * Min size is different for OOO, TPA and non-TPA queues |
| 4545 | */ | 4547 | */ |
| 4546 | if (ring_size < (fp->disable_tpa ? | 4548 | if (ring_size < (fp->mode == TPA_MODE_DISABLED ? |
| 4547 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { | 4549 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { |
| 4548 | /* release memory allocated for this queue */ | 4550 | /* release memory allocated for this queue */ |
| 4549 | bnx2x_free_fp_mem_at(bp, index); | 4551 | bnx2x_free_fp_mem_at(bp, index); |
| @@ -4784,6 +4786,11 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | |||
| 4784 | { | 4786 | { |
| 4785 | struct bnx2x *bp = netdev_priv(dev); | 4787 | struct bnx2x *bp = netdev_priv(dev); |
| 4786 | 4788 | ||
| 4789 | if (pci_num_vf(bp->pdev)) { | ||
| 4790 | DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); | ||
| 4791 | return -EPERM; | ||
| 4792 | } | ||
| 4793 | |||
| 4787 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 4794 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
| 4788 | BNX2X_ERR("Can't perform change MTU during parity recovery\n"); | 4795 | BNX2X_ERR("Can't perform change MTU during parity recovery\n"); |
| 4789 | return -EAGAIN; | 4796 | return -EAGAIN; |
| @@ -4809,66 +4816,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, | |||
| 4809 | { | 4816 | { |
| 4810 | struct bnx2x *bp = netdev_priv(dev); | 4817 | struct bnx2x *bp = netdev_priv(dev); |
| 4811 | 4818 | ||
| 4819 | if (pci_num_vf(bp->pdev)) { | ||
| 4820 | netdev_features_t changed = dev->features ^ features; | ||
| 4821 | |||
| 4822 | /* Revert the requested changes in features if they | ||
| 4823 | * would require internal reload of PF in bnx2x_set_features(). | ||
| 4824 | */ | ||
| 4825 | if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { | ||
| 4826 | features &= ~NETIF_F_RXCSUM; | ||
| 4827 | features |= dev->features & NETIF_F_RXCSUM; | ||
| 4828 | } | ||
| 4829 | |||
| 4830 | if (changed & NETIF_F_LOOPBACK) { | ||
| 4831 | features &= ~NETIF_F_LOOPBACK; | ||
| 4832 | features |= dev->features & NETIF_F_LOOPBACK; | ||
| 4833 | } | ||
| 4834 | } | ||
| 4835 | |||
| 4812 | /* TPA requires Rx CSUM offloading */ | 4836 | /* TPA requires Rx CSUM offloading */ |
| 4813 | if (!(features & NETIF_F_RXCSUM)) { | 4837 | if (!(features & NETIF_F_RXCSUM)) { |
| 4814 | features &= ~NETIF_F_LRO; | 4838 | features &= ~NETIF_F_LRO; |
| 4815 | features &= ~NETIF_F_GRO; | 4839 | features &= ~NETIF_F_GRO; |
| 4816 | } | 4840 | } |
| 4817 | 4841 | ||
| 4818 | /* Note: do not disable SW GRO in kernel when HW GRO is off */ | ||
| 4819 | if (bp->disable_tpa) | ||
| 4820 | features &= ~NETIF_F_LRO; | ||
| 4821 | |||
| 4822 | return features; | 4842 | return features; |
| 4823 | } | 4843 | } |
| 4824 | 4844 | ||
| 4825 | int bnx2x_set_features(struct net_device *dev, netdev_features_t features) | 4845 | int bnx2x_set_features(struct net_device *dev, netdev_features_t features) |
| 4826 | { | 4846 | { |
| 4827 | struct bnx2x *bp = netdev_priv(dev); | 4847 | struct bnx2x *bp = netdev_priv(dev); |
| 4828 | u32 flags = bp->flags; | 4848 | netdev_features_t changes = features ^ dev->features; |
| 4829 | u32 changes; | ||
| 4830 | bool bnx2x_reload = false; | 4849 | bool bnx2x_reload = false; |
| 4850 | int rc; | ||
| 4831 | 4851 | ||
| 4832 | if (features & NETIF_F_LRO) | 4852 | /* VFs or non SRIOV PFs should be able to change loopback feature */ |
| 4833 | flags |= TPA_ENABLE_FLAG; | 4853 | if (!pci_num_vf(bp->pdev)) { |
| 4834 | else | 4854 | if (features & NETIF_F_LOOPBACK) { |
| 4835 | flags &= ~TPA_ENABLE_FLAG; | 4855 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { |
| 4836 | 4856 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | |
| 4837 | if (features & NETIF_F_GRO) | 4857 | bnx2x_reload = true; |
| 4838 | flags |= GRO_ENABLE_FLAG; | 4858 | } |
| 4839 | else | 4859 | } else { |
| 4840 | flags &= ~GRO_ENABLE_FLAG; | 4860 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { |
| 4841 | 4861 | bp->link_params.loopback_mode = LOOPBACK_NONE; | |
| 4842 | if (features & NETIF_F_LOOPBACK) { | 4862 | bnx2x_reload = true; |
| 4843 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { | 4863 | } |
| 4844 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | ||
| 4845 | bnx2x_reload = true; | ||
| 4846 | } | ||
| 4847 | } else { | ||
| 4848 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { | ||
| 4849 | bp->link_params.loopback_mode = LOOPBACK_NONE; | ||
| 4850 | bnx2x_reload = true; | ||
| 4851 | } | 4864 | } |
| 4852 | } | 4865 | } |
| 4853 | 4866 | ||
| 4854 | changes = flags ^ bp->flags; | ||
| 4855 | |||
| 4856 | /* if GRO is changed while LRO is enabled, don't force a reload */ | 4867 | /* if GRO is changed while LRO is enabled, don't force a reload */ |
| 4857 | if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) | 4868 | if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) |
| 4858 | changes &= ~GRO_ENABLE_FLAG; | 4869 | changes &= ~NETIF_F_GRO; |
| 4859 | 4870 | ||
| 4860 | /* if GRO is changed while HW TPA is off, don't force a reload */ | 4871 | /* if GRO is changed while HW TPA is off, don't force a reload */ |
| 4861 | if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) | 4872 | if ((changes & NETIF_F_GRO) && bp->disable_tpa) |
| 4862 | changes &= ~GRO_ENABLE_FLAG; | 4873 | changes &= ~NETIF_F_GRO; |
| 4863 | 4874 | ||
| 4864 | if (changes) | 4875 | if (changes) |
| 4865 | bnx2x_reload = true; | 4876 | bnx2x_reload = true; |
| 4866 | 4877 | ||
| 4867 | bp->flags = flags; | ||
| 4868 | |||
| 4869 | if (bnx2x_reload) { | 4878 | if (bnx2x_reload) { |
| 4870 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) | 4879 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) { |
| 4871 | return bnx2x_reload_if_running(dev); | 4880 | dev->features = features; |
| 4881 | rc = bnx2x_reload_if_running(dev); | ||
| 4882 | return rc ? rc : 1; | ||
| 4883 | } | ||
| 4872 | /* else: bnx2x_nic_load() will be called at end of recovery */ | 4884 | /* else: bnx2x_nic_load() will be called at end of recovery */ |
| 4873 | } | 4885 | } |
| 4874 | 4886 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index adcacda7af7b..d7a71758e876 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
| @@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | |||
| 969 | { | 969 | { |
| 970 | int i; | 970 | int i; |
| 971 | 971 | ||
| 972 | if (fp->disable_tpa) | 972 | if (fp->mode == TPA_MODE_DISABLED) |
| 973 | return; | 973 | return; |
| 974 | 974 | ||
| 975 | for (i = 0; i < last; i++) | 975 | for (i = 0; i < last; i++) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index e3d853cab7c9..48ed005ba73f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
| @@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
| 1843 | "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", | 1843 | "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", |
| 1844 | ering->rx_pending, ering->tx_pending); | 1844 | ering->rx_pending, ering->tx_pending); |
| 1845 | 1845 | ||
| 1846 | if (pci_num_vf(bp->pdev)) { | ||
| 1847 | DP(BNX2X_MSG_IOV, | ||
| 1848 | "VFs are enabled, can not change ring parameters\n"); | ||
| 1849 | return -EPERM; | ||
| 1850 | } | ||
| 1851 | |||
| 1846 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 1852 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
| 1847 | DP(BNX2X_MSG_ETHTOOL, | 1853 | DP(BNX2X_MSG_ETHTOOL, |
| 1848 | "Handling parity error recovery. Try again later\n"); | 1854 | "Handling parity error recovery. Try again later\n"); |
| @@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev, | |||
| 2899 | u8 is_serdes, link_up; | 2905 | u8 is_serdes, link_up; |
| 2900 | int rc, cnt = 0; | 2906 | int rc, cnt = 0; |
| 2901 | 2907 | ||
| 2908 | if (pci_num_vf(bp->pdev)) { | ||
| 2909 | DP(BNX2X_MSG_IOV, | ||
| 2910 | "VFs are enabled, can not perform self test\n"); | ||
| 2911 | return; | ||
| 2912 | } | ||
| 2913 | |||
| 2902 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 2914 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
| 2903 | netdev_err(bp->dev, | 2915 | netdev_err(bp->dev, |
| 2904 | "Handling parity error recovery. Try again later\n"); | 2916 | "Handling parity error recovery. Try again later\n"); |
| @@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev, | |||
| 3468 | channels->rx_count, channels->tx_count, channels->other_count, | 3480 | channels->rx_count, channels->tx_count, channels->other_count, |
| 3469 | channels->combined_count); | 3481 | channels->combined_count); |
| 3470 | 3482 | ||
| 3483 | if (pci_num_vf(bp->pdev)) { | ||
| 3484 | DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n"); | ||
| 3485 | return -EPERM; | ||
| 3486 | } | ||
| 3487 | |||
| 3471 | /* We don't support separate rx / tx channels. | 3488 | /* We don't support separate rx / tx channels. |
| 3472 | * We don't allow setting 'other' channels. | 3489 | * We don't allow setting 'other' channels. |
| 3473 | */ | 3490 | */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b9f85fccb419..33501bcddc48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, | |||
| 3128 | __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); | 3128 | __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); |
| 3129 | } | 3129 | } |
| 3130 | 3130 | ||
| 3131 | if (!fp->disable_tpa) { | 3131 | if (fp->mode != TPA_MODE_DISABLED) { |
| 3132 | __set_bit(BNX2X_Q_FLG_TPA, &flags); | 3132 | __set_bit(BNX2X_Q_FLG_TPA, &flags); |
| 3133 | __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); | 3133 | __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); |
| 3134 | if (fp->mode == TPA_MODE_GRO) | 3134 | if (fp->mode == TPA_MODE_GRO) |
| @@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
| 3176 | u16 sge_sz = 0; | 3176 | u16 sge_sz = 0; |
| 3177 | u16 tpa_agg_size = 0; | 3177 | u16 tpa_agg_size = 0; |
| 3178 | 3178 | ||
| 3179 | if (!fp->disable_tpa) { | 3179 | if (fp->mode != TPA_MODE_DISABLED) { |
| 3180 | pause->sge_th_lo = SGE_TH_LO(bp); | 3180 | pause->sge_th_lo = SGE_TH_LO(bp); |
| 3181 | pause->sge_th_hi = SGE_TH_HI(bp); | 3181 | pause->sge_th_hi = SGE_TH_HI(bp); |
| 3182 | 3182 | ||
| @@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
| 3304 | /* This flag is relevant for E1x only. | 3304 | /* This flag is relevant for E1x only. |
| 3305 | * E2 doesn't have a TPA configuration in a function level. | 3305 | * E2 doesn't have a TPA configuration in a function level. |
| 3306 | */ | 3306 | */ |
| 3307 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; | 3307 | flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; |
| 3308 | 3308 | ||
| 3309 | func_init.func_flgs = flags; | 3309 | func_init.func_flgs = flags; |
| 3310 | func_init.pf_id = BP_FUNC(bp); | 3310 | func_init.pf_id = BP_FUNC(bp); |
| @@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
| 12054 | mutex_init(&bp->port.phy_mutex); | 12054 | mutex_init(&bp->port.phy_mutex); |
| 12055 | mutex_init(&bp->fw_mb_mutex); | 12055 | mutex_init(&bp->fw_mb_mutex); |
| 12056 | mutex_init(&bp->drv_info_mutex); | 12056 | mutex_init(&bp->drv_info_mutex); |
| 12057 | mutex_init(&bp->stats_lock); | 12057 | sema_init(&bp->stats_lock, 1); |
| 12058 | bp->drv_info_mng_owner = false; | 12058 | bp->drv_info_mng_owner = false; |
| 12059 | 12059 | ||
| 12060 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 12060 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
| @@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
| 12107 | 12107 | ||
| 12108 | /* Set TPA flags */ | 12108 | /* Set TPA flags */ |
| 12109 | if (bp->disable_tpa) { | 12109 | if (bp->disable_tpa) { |
| 12110 | bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); | 12110 | bp->dev->hw_features &= ~NETIF_F_LRO; |
| 12111 | bp->dev->features &= ~NETIF_F_LRO; | 12111 | bp->dev->features &= ~NETIF_F_LRO; |
| 12112 | } else { | ||
| 12113 | bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); | ||
| 12114 | bp->dev->features |= NETIF_F_LRO; | ||
| 12115 | } | 12112 | } |
| 12116 | 12113 | ||
| 12117 | if (CHIP_IS_E1(bp)) | 12114 | if (CHIP_IS_E1(bp)) |
| @@ -13371,6 +13368,17 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13371 | bool is_vf; | 13368 | bool is_vf; |
| 13372 | int cnic_cnt; | 13369 | int cnic_cnt; |
| 13373 | 13370 | ||
| 13371 | /* Management FW 'remembers' living interfaces. Allow it some time | ||
| 13372 | * to forget previously living interfaces, allowing a proper re-load. | ||
| 13373 | */ | ||
| 13374 | if (is_kdump_kernel()) { | ||
| 13375 | ktime_t now = ktime_get_boottime(); | ||
| 13376 | ktime_t fw_ready_time = ktime_set(5, 0); | ||
| 13377 | |||
| 13378 | if (ktime_before(now, fw_ready_time)) | ||
| 13379 | msleep(ktime_ms_delta(fw_ready_time, now)); | ||
| 13380 | } | ||
| 13381 | |||
| 13374 | /* An estimated maximum supported CoS number according to the chip | 13382 | /* An estimated maximum supported CoS number according to the chip |
| 13375 | * version. | 13383 | * version. |
| 13376 | * We will try to roughly estimate the maximum number of CoSes this chip | 13384 | * We will try to roughly estimate the maximum number of CoSes this chip |
| @@ -13682,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
| 13682 | cancel_delayed_work_sync(&bp->sp_task); | 13690 | cancel_delayed_work_sync(&bp->sp_task); |
| 13683 | cancel_delayed_work_sync(&bp->period_task); | 13691 | cancel_delayed_work_sync(&bp->period_task); |
| 13684 | 13692 | ||
| 13685 | mutex_lock(&bp->stats_lock); | 13693 | if (!down_timeout(&bp->stats_lock, HZ / 10)) { |
| 13686 | bp->stats_state = STATS_STATE_DISABLED; | 13694 | bp->stats_state = STATS_STATE_DISABLED; |
| 13687 | mutex_unlock(&bp->stats_lock); | 13695 | up(&bp->stats_lock); |
| 13696 | } | ||
| 13688 | 13697 | ||
| 13689 | bnx2x_save_statistics(bp); | 13698 | bnx2x_save_statistics(bp); |
| 13690 | 13699 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 266b055c2360..69d699f0730a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
| @@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | |||
| 1372 | * that context in case someone is in the middle of a transition. | 1372 | * that context in case someone is in the middle of a transition. |
| 1373 | * For other events, wait a bit until lock is taken. | 1373 | * For other events, wait a bit until lock is taken. |
| 1374 | */ | 1374 | */ |
| 1375 | if (!mutex_trylock(&bp->stats_lock)) { | 1375 | if (down_trylock(&bp->stats_lock)) { |
| 1376 | if (event == STATS_EVENT_UPDATE) | 1376 | if (event == STATS_EVENT_UPDATE) |
| 1377 | return; | 1377 | return; |
| 1378 | 1378 | ||
| 1379 | DP(BNX2X_MSG_STATS, | 1379 | DP(BNX2X_MSG_STATS, |
| 1380 | "Unlikely stats' lock contention [event %d]\n", event); | 1380 | "Unlikely stats' lock contention [event %d]\n", event); |
| 1381 | mutex_lock(&bp->stats_lock); | 1381 | if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { |
| 1382 | BNX2X_ERR("Failed to take stats lock [event %d]\n", | ||
| 1383 | event); | ||
| 1384 | return; | ||
| 1385 | } | ||
| 1382 | } | 1386 | } |
| 1383 | 1387 | ||
| 1384 | bnx2x_stats_stm[state][event].action(bp); | 1388 | bnx2x_stats_stm[state][event].action(bp); |
| 1385 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1389 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
| 1386 | 1390 | ||
| 1387 | mutex_unlock(&bp->stats_lock); | 1391 | up(&bp->stats_lock); |
| 1388 | 1392 | ||
| 1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1393 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
| 1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1394 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
| @@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp, | |||
| 1970 | /* Wait for statistics to end [while blocking further requests], | 1974 | /* Wait for statistics to end [while blocking further requests], |
| 1971 | * then run supplied function 'safely'. | 1975 | * then run supplied function 'safely'. |
| 1972 | */ | 1976 | */ |
| 1973 | mutex_lock(&bp->stats_lock); | 1977 | rc = down_timeout(&bp->stats_lock, HZ / 10); |
| 1978 | if (unlikely(rc)) { | ||
| 1979 | BNX2X_ERR("Failed to take statistics lock for safe execution\n"); | ||
| 1980 | goto out_no_lock; | ||
| 1981 | } | ||
| 1974 | 1982 | ||
| 1975 | bnx2x_stats_comp(bp); | 1983 | bnx2x_stats_comp(bp); |
| 1976 | while (bp->stats_pending && cnt--) | 1984 | while (bp->stats_pending && cnt--) |
| @@ -1988,7 +1996,7 @@ out: | |||
| 1988 | /* No need to restart statistics - if they're enabled, the timer | 1996 | /* No need to restart statistics - if they're enabled, the timer |
| 1989 | * will restart the statistics. | 1997 | * will restart the statistics. |
| 1990 | */ | 1998 | */ |
| 1991 | mutex_unlock(&bp->stats_lock); | 1999 | up(&bp->stats_lock); |
| 1992 | 2000 | out_no_lock: | |
| 1993 | return rc; | 2001 | return rc; |
| 1994 | } | 2002 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 15b2d1647560..06b8c0d8fd3b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
| @@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
| 594 | bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); | 594 | bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); |
| 595 | 595 | ||
| 596 | /* select tpa mode to request */ | 596 | /* select tpa mode to request */ |
| 597 | if (!fp->disable_tpa) { | 597 | if (fp->mode != TPA_MODE_DISABLED) { |
| 598 | flags |= VFPF_QUEUE_FLG_TPA; | 598 | flags |= VFPF_QUEUE_FLG_TPA; |
| 599 | flags |= VFPF_QUEUE_FLG_TPA_IPV6; | 599 | flags |= VFPF_QUEUE_FLG_TPA_IPV6; |
| 600 | if (fp->mode == TPA_MODE_GRO) | 600 | if (fp->mode == TPA_MODE_GRO) |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e7651b3c6c57..420949cc55aa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) | |||
| 299 | phy_name = "external RGMII (no delay)"; | 299 | phy_name = "external RGMII (no delay)"; |
| 300 | else | 300 | else |
| 301 | phy_name = "external RGMII (TX delay)"; | 301 | phy_name = "external RGMII (TX delay)"; |
| 302 | reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); | ||
| 303 | reg |= RGMII_MODE_EN | id_mode_dis; | ||
| 304 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); | ||
| 305 | bcmgenet_sys_writel(priv, | 302 | bcmgenet_sys_writel(priv, |
| 306 | PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); | 303 | PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); |
| 307 | break; | 304 | break; |
| @@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) | |||
| 310 | return -EINVAL; | 307 | return -EINVAL; |
| 311 | } | 308 | } |
| 312 | 309 | ||
| 310 | /* This is an external PHY (xMII), so we need to enable the RGMII | ||
| 311 | * block for the interface to work | ||
| 312 | */ | ||
| 313 | if (priv->ext_phy) { | ||
| 314 | reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); | ||
| 315 | reg |= RGMII_MODE_EN | id_mode_dis; | ||
| 316 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); | ||
| 317 | } | ||
| 318 | |||
| 313 | if (init) | 319 | if (init) |
| 314 | dev_info(kdev, "configuring instance for %s\n", phy_name); | 320 | dev_info(kdev, "configuring instance for %s\n", phy_name); |
| 315 | 321 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1270b189a9a2..069952fa5d64 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
| 18129 | 18129 | ||
| 18130 | rtnl_lock(); | 18130 | rtnl_lock(); |
| 18131 | 18131 | ||
| 18132 | tp->pcierr_recovery = true; | 18132 | /* We needn't recover from permanent error */ |
| 18133 | if (state == pci_channel_io_frozen) | ||
| 18134 | tp->pcierr_recovery = true; | ||
| 18133 | 18135 | ||
| 18134 | /* We probably don't have netdev yet */ | 18136 | /* We probably don't have netdev yet */ |
| 18135 | if (!netdev || !netif_running(netdev)) | 18137 | if (!netdev || !netif_running(netdev)) |
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 594a2ab36d31..68f3c13c9ef6 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
| @@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, | |||
| 2414 | if (status == BFA_STATUS_OK) | 2414 | if (status == BFA_STATUS_OK) |
| 2415 | bfa_ioc_lpu_start(ioc); | 2415 | bfa_ioc_lpu_start(ioc); |
| 2416 | else | 2416 | else |
| 2417 | bfa_nw_iocpf_timeout(ioc); | 2417 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); |
| 2418 | 2418 | ||
| 2419 | return status; | 2419 | return status; |
| 2420 | } | 2420 | } |
| @@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) | |||
| 3029 | } | 3029 | } |
| 3030 | 3030 | ||
| 3031 | if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { | 3031 | if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { |
| 3032 | bfa_nw_iocpf_timeout(ioc); | 3032 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); |
| 3033 | } else { | 3033 | } else { |
| 3034 | ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; | 3034 | ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; |
| 3035 | mod_timer(&ioc->iocpf_timer, jiffies + | 3035 | mod_timer(&ioc->iocpf_timer, jiffies + |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 37072a83f9d6..caae6cb2bc1a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
| @@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
| 3701 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, | 3701 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, |
| 3702 | ((unsigned long)bnad)); | 3702 | ((unsigned long)bnad)); |
| 3703 | 3703 | ||
| 3704 | /* Now start the timer before calling IOC */ | ||
| 3705 | mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, | ||
| 3706 | jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); | ||
| 3707 | |||
| 3708 | /* | 3704 | /* |
| 3709 | * Start the chip | 3705 | * Start the chip |
| 3710 | * If the call back comes with error, we bail out. | 3706 | * If the call back comes with error, we bail out. |
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index ebf462d8082f..badea368bdc8 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c | |||
| @@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, | |||
| 30 | u32 *bfi_image_size, char *fw_name) | 30 | u32 *bfi_image_size, char *fw_name) |
| 31 | { | 31 | { |
| 32 | const struct firmware *fw; | 32 | const struct firmware *fw; |
| 33 | u32 n; | ||
| 33 | 34 | ||
| 34 | if (request_firmware(&fw, fw_name, &pdev->dev)) { | 35 | if (request_firmware(&fw, fw_name, &pdev->dev)) { |
| 35 | pr_alert("Can't locate firmware %s\n", fw_name); | 36 | pr_alert("Can't locate firmware %s\n", fw_name); |
| @@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, | |||
| 40 | *bfi_image_size = fw->size/sizeof(u32); | 41 | *bfi_image_size = fw->size/sizeof(u32); |
| 41 | bfi_fw = fw; | 42 | bfi_fw = fw; |
| 42 | 43 | ||
| 44 | /* Convert loaded firmware to host order as it is stored in file | ||
| 45 | * as sequence of LE32 integers. | ||
| 46 | */ | ||
| 47 | for (n = 0; n < *bfi_image_size; n++) | ||
| 48 | le32_to_cpus(*bfi_image + n); | ||
| 49 | |||
| 43 | return *bfi_image; | 50 | return *bfi_image; |
| 44 | error: | 51 | error: |
| 45 | return NULL; | 52 | return NULL; |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 9f5387249f24..fc646a41d548 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
| @@ -350,6 +350,9 @@ static int macb_mii_probe(struct net_device *dev) | |||
| 350 | else | 350 | else |
| 351 | phydev->supported &= PHY_BASIC_FEATURES; | 351 | phydev->supported &= PHY_BASIC_FEATURES; |
| 352 | 352 | ||
| 353 | if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) | ||
| 354 | phydev->supported &= ~SUPPORTED_1000baseT_Half; | ||
| 355 | |||
| 353 | phydev->advertising = phydev->supported; | 356 | phydev->advertising = phydev->supported; |
| 354 | 357 | ||
| 355 | bp->link = 0; | 358 | bp->link = 0; |
| @@ -707,6 +710,9 @@ static void gem_rx_refill(struct macb *bp) | |||
| 707 | 710 | ||
| 708 | /* properly align Ethernet header */ | 711 | /* properly align Ethernet header */ |
| 709 | skb_reserve(skb, NET_IP_ALIGN); | 712 | skb_reserve(skb, NET_IP_ALIGN); |
| 713 | } else { | ||
| 714 | bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); | ||
| 715 | bp->rx_ring[entry].ctrl = 0; | ||
| 710 | } | 716 | } |
| 711 | } | 717 | } |
| 712 | 718 | ||
| @@ -978,7 +984,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
| 978 | struct macb_queue *queue = dev_id; | 984 | struct macb_queue *queue = dev_id; |
| 979 | struct macb *bp = queue->bp; | 985 | struct macb *bp = queue->bp; |
| 980 | struct net_device *dev = bp->dev; | 986 | struct net_device *dev = bp->dev; |
| 981 | u32 status; | 987 | u32 status, ctrl; |
| 982 | 988 | ||
| 983 | status = queue_readl(queue, ISR); | 989 | status = queue_readl(queue, ISR); |
| 984 | 990 | ||
| @@ -1034,6 +1040,21 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
| 1034 | * add that if/when we get our hands on a full-blown MII PHY. | 1040 | * add that if/when we get our hands on a full-blown MII PHY. |
| 1035 | */ | 1041 | */ |
| 1036 | 1042 | ||
| 1043 | /* There is a hardware issue under heavy load where DMA can | ||
| 1044 | * stop, this causes endless "used buffer descriptor read" | ||
| 1045 | * interrupts but it can be cleared by re-enabling RX. See | ||
| 1046 | * the at91 manual, section 41.3.1 or the Zynq manual | ||
| 1047 | * section 16.7.4 for details. | ||
| 1048 | */ | ||
| 1049 | if (status & MACB_BIT(RXUBR)) { | ||
| 1050 | ctrl = macb_readl(bp, NCR); | ||
| 1051 | macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); | ||
| 1052 | macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); | ||
| 1053 | |||
| 1054 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | ||
| 1055 | macb_writel(bp, ISR, MACB_BIT(RXUBR)); | ||
| 1056 | } | ||
| 1057 | |||
| 1037 | if (status & MACB_BIT(ISR_ROVR)) { | 1058 | if (status & MACB_BIT(ISR_ROVR)) { |
| 1038 | /* We missed at least one packet */ | 1059 | /* We missed at least one packet */ |
| 1039 | if (macb_is_gem(bp)) | 1060 | if (macb_is_gem(bp)) |
| @@ -1473,9 +1494,9 @@ static void macb_init_rings(struct macb *bp) | |||
| 1473 | for (i = 0; i < TX_RING_SIZE; i++) { | 1494 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 1474 | bp->queues[0].tx_ring[i].addr = 0; | 1495 | bp->queues[0].tx_ring[i].addr = 0; |
| 1475 | bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); | 1496 | bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); |
| 1476 | bp->queues[0].tx_head = 0; | ||
| 1477 | bp->queues[0].tx_tail = 0; | ||
| 1478 | } | 1497 | } |
| 1498 | bp->queues[0].tx_head = 0; | ||
| 1499 | bp->queues[0].tx_tail = 0; | ||
| 1479 | bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | 1500 | bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
| 1480 | 1501 | ||
| 1481 | bp->rx_tail = 0; | 1502 | bp->rx_tail = 0; |
| @@ -2681,6 +2702,14 @@ static const struct macb_config emac_config = { | |||
| 2681 | .init = at91ether_init, | 2702 | .init = at91ether_init, |
| 2682 | }; | 2703 | }; |
| 2683 | 2704 | ||
| 2705 | static const struct macb_config zynq_config = { | ||
| 2706 | .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | | ||
| 2707 | MACB_CAPS_NO_GIGABIT_HALF, | ||
| 2708 | .dma_burst_length = 16, | ||
| 2709 | .clk_init = macb_clk_init, | ||
| 2710 | .init = macb_init, | ||
| 2711 | }; | ||
| 2712 | |||
| 2684 | static const struct of_device_id macb_dt_ids[] = { | 2713 | static const struct of_device_id macb_dt_ids[] = { |
| 2685 | { .compatible = "cdns,at32ap7000-macb" }, | 2714 | { .compatible = "cdns,at32ap7000-macb" }, |
| 2686 | { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, | 2715 | { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config }, |
| @@ -2691,6 +2720,7 @@ static const struct of_device_id macb_dt_ids[] = { | |||
| 2691 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, | 2720 | { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, |
| 2692 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, | 2721 | { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, |
| 2693 | { .compatible = "cdns,emac", .data = &emac_config }, | 2722 | { .compatible = "cdns,emac", .data = &emac_config }, |
| 2723 | { .compatible = "cdns,zynq-gem", .data = &zynq_config }, | ||
| 2694 | { /* sentinel */ } | 2724 | { /* sentinel */ } |
| 2695 | }; | 2725 | }; |
| 2696 | MODULE_DEVICE_TABLE(of, macb_dt_ids); | 2726 | MODULE_DEVICE_TABLE(of, macb_dt_ids); |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index eb7d76f7bf6a..24b1d9bcd865 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
| @@ -393,6 +393,7 @@ | |||
| 393 | #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 | 393 | #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 |
| 394 | #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 | 394 | #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 |
| 395 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 | 395 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 |
| 396 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 | ||
| 396 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 397 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
| 397 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 398 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
| 398 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 399 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5959e3ae72da..e8578a742f2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
| 492 | memoffset = (mtype * (edc_size * 1024 * 1024)); | 492 | memoffset = (mtype * (edc_size * 1024 * 1024)); |
| 493 | else { | 493 | else { |
| 494 | mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, | 494 | mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, |
| 495 | MA_EXT_MEMORY1_BAR_A)); | 495 | MA_EXT_MEMORY0_BAR_A)); |
| 496 | memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; | 496 | memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; |
| 497 | } | 497 | } |
| 498 | 498 | ||
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 28d9ca675a27..68d47b196dae 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c | |||
| @@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev, | |||
| 131 | { | 131 | { |
| 132 | struct enic *enic = netdev_priv(netdev); | 132 | struct enic *enic = netdev_priv(netdev); |
| 133 | struct vnic_devcmd_fw_info *fw_info; | 133 | struct vnic_devcmd_fw_info *fw_info; |
| 134 | int err; | ||
| 134 | 135 | ||
| 135 | enic_dev_fw_info(enic, &fw_info); | 136 | err = enic_dev_fw_info(enic, &fw_info); |
| 137 | /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info | ||
| 138 | * For other failures, like devcmd failure, we return previously | ||
| 139 | * recorded info. | ||
| 140 | */ | ||
| 141 | if (err == -ENOMEM) | ||
| 142 | return; | ||
| 136 | 143 | ||
| 137 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | 144 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); |
| 138 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | 145 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); |
| @@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev, | |||
| 181 | struct enic *enic = netdev_priv(netdev); | 188 | struct enic *enic = netdev_priv(netdev); |
| 182 | struct vnic_stats *vstats; | 189 | struct vnic_stats *vstats; |
| 183 | unsigned int i; | 190 | unsigned int i; |
| 184 | 191 | int err; | |
| 185 | enic_dev_stats_dump(enic, &vstats); | 192 | |
| 193 | err = enic_dev_stats_dump(enic, &vstats); | ||
| 194 | /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump | ||
| 195 | * For other failures, like devcmd failure, we return previously | ||
| 196 | * recorded stats. | ||
| 197 | */ | ||
| 198 | if (err == -ENOMEM) | ||
| 199 | return; | ||
| 186 | 200 | ||
| 187 | for (i = 0; i < enic_n_tx_stats; i++) | 201 | for (i = 0; i < enic_n_tx_stats; i++) |
| 188 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; | 202 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 204bd182473b..eadae1b412c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
| @@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, | |||
| 615 | { | 615 | { |
| 616 | struct enic *enic = netdev_priv(netdev); | 616 | struct enic *enic = netdev_priv(netdev); |
| 617 | struct vnic_stats *stats; | 617 | struct vnic_stats *stats; |
| 618 | int err; | ||
| 618 | 619 | ||
| 619 | enic_dev_stats_dump(enic, &stats); | 620 | err = enic_dev_stats_dump(enic, &stats); |
| 621 | /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump | ||
| 622 | * For other failures, like devcmd failure, we return previously | ||
| 623 | * recorded stats. | ||
| 624 | */ | ||
| 625 | if (err == -ENOMEM) | ||
| 626 | return net_stats; | ||
| 620 | 627 | ||
| 621 | net_stats->tx_packets = stats->tx.tx_frames_ok; | 628 | net_stats->tx_packets = stats->tx.tx_frames_ok; |
| 622 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | 629 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; |
| @@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
| 1407 | */ | 1414 | */ |
| 1408 | enic_calc_int_moderation(enic, &enic->rq[rq]); | 1415 | enic_calc_int_moderation(enic, &enic->rq[rq]); |
| 1409 | 1416 | ||
| 1417 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
| 1410 | if (work_done < work_to_do) { | 1418 | if (work_done < work_to_do) { |
| 1411 | 1419 | ||
| 1412 | /* Some work done, but not enough to stay in polling, | 1420 | /* Some work done, but not enough to stay in polling, |
| @@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
| 1418 | enic_set_int_moderation(enic, &enic->rq[rq]); | 1426 | enic_set_int_moderation(enic, &enic->rq[rq]); |
| 1419 | vnic_intr_unmask(&enic->intr[intr]); | 1427 | vnic_intr_unmask(&enic->intr[intr]); |
| 1420 | } | 1428 | } |
| 1421 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
| 1422 | 1429 | ||
| 1423 | return work_done; | 1430 | return work_done; |
| 1424 | } | 1431 | } |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36a2ed606c91..c4b2183bf352 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c | |||
| @@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq, | |||
| 188 | struct vnic_rq_buf *buf; | 188 | struct vnic_rq_buf *buf; |
| 189 | u32 fetch_index; | 189 | u32 fetch_index; |
| 190 | unsigned int count = rq->ring.desc_count; | 190 | unsigned int count = rq->ring.desc_count; |
| 191 | int i; | ||
| 191 | 192 | ||
| 192 | buf = rq->to_clean; | 193 | buf = rq->to_clean; |
| 193 | 194 | ||
| 194 | while (vnic_rq_desc_used(rq) > 0) { | 195 | for (i = 0; i < rq->ring.desc_count; i++) { |
| 195 | |||
| 196 | (*buf_clean)(rq, buf); | 196 | (*buf_clean)(rq, buf); |
| 197 | 197 | buf = buf->next; | |
| 198 | buf = rq->to_clean = buf->next; | ||
| 199 | rq->ring.desc_avail++; | ||
| 200 | } | 198 | } |
| 199 | rq->ring.desc_avail = rq->ring.desc_count - 1; | ||
| 201 | 200 | ||
| 202 | /* Use current fetch_index as the ring starting point */ | 201 | /* Use current fetch_index as the ring starting point */ |
| 203 | fetch_index = ioread32(&rq->ctrl->fetch_index); | 202 | fetch_index = ioread32(&rq->ctrl->fetch_index); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index fb140faeafb1..c5e1d0ac75f9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) | |||
| 1720 | total_size = buf_len; | 1720 | total_size = buf_len; |
| 1721 | 1721 | ||
| 1722 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; | 1722 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; |
| 1723 | get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, | 1723 | get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 1724 | get_fat_cmd.size, | 1724 | get_fat_cmd.size, |
| 1725 | &get_fat_cmd.dma); | 1725 | &get_fat_cmd.dma, GFP_ATOMIC); |
| 1726 | if (!get_fat_cmd.va) { | 1726 | if (!get_fat_cmd.va) { |
| 1727 | dev_err(&adapter->pdev->dev, | 1727 | dev_err(&adapter->pdev->dev, |
| 1728 | "Memory allocation failure while reading FAT data\n"); | 1728 | "Memory allocation failure while reading FAT data\n"); |
| @@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) | |||
| 1767 | log_offset += buf_size; | 1767 | log_offset += buf_size; |
| 1768 | } | 1768 | } |
| 1769 | err: | 1769 | err: |
| 1770 | pci_free_consistent(adapter->pdev, get_fat_cmd.size, | 1770 | dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, |
| 1771 | get_fat_cmd.va, get_fat_cmd.dma); | 1771 | get_fat_cmd.va, get_fat_cmd.dma); |
| 1772 | spin_unlock_bh(&adapter->mcc_lock); | 1772 | spin_unlock_bh(&adapter->mcc_lock); |
| 1773 | return status; | 1773 | return status; |
| 1774 | } | 1774 | } |
| @@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, | |||
| 2215 | return -EINVAL; | 2215 | return -EINVAL; |
| 2216 | 2216 | ||
| 2217 | cmd.size = sizeof(struct be_cmd_resp_port_type); | 2217 | cmd.size = sizeof(struct be_cmd_resp_port_type); |
| 2218 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 2218 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 2219 | GFP_ATOMIC); | ||
| 2219 | if (!cmd.va) { | 2220 | if (!cmd.va) { |
| 2220 | dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); | 2221 | dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); |
| 2221 | return -ENOMEM; | 2222 | return -ENOMEM; |
| 2222 | } | 2223 | } |
| 2223 | memset(cmd.va, 0, cmd.size); | ||
| 2224 | 2224 | ||
| 2225 | spin_lock_bh(&adapter->mcc_lock); | 2225 | spin_lock_bh(&adapter->mcc_lock); |
| 2226 | 2226 | ||
| @@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, | |||
| 2245 | } | 2245 | } |
| 2246 | err: | 2246 | err: |
| 2247 | spin_unlock_bh(&adapter->mcc_lock); | 2247 | spin_unlock_bh(&adapter->mcc_lock); |
| 2248 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 2248 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); |
| 2249 | return status; | 2249 | return status; |
| 2250 | } | 2250 | } |
| 2251 | 2251 | ||
| @@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) | |||
| 2720 | goto err; | 2720 | goto err; |
| 2721 | } | 2721 | } |
| 2722 | cmd.size = sizeof(struct be_cmd_req_get_phy_info); | 2722 | cmd.size = sizeof(struct be_cmd_req_get_phy_info); |
| 2723 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 2723 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 2724 | GFP_ATOMIC); | ||
| 2724 | if (!cmd.va) { | 2725 | if (!cmd.va) { |
| 2725 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 2726 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); |
| 2726 | status = -ENOMEM; | 2727 | status = -ENOMEM; |
| @@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) | |||
| 2754 | BE_SUPPORTED_SPEED_1GBPS; | 2755 | BE_SUPPORTED_SPEED_1GBPS; |
| 2755 | } | 2756 | } |
| 2756 | } | 2757 | } |
| 2757 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 2758 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); |
| 2758 | err: | 2759 | err: |
| 2759 | spin_unlock_bh(&adapter->mcc_lock); | 2760 | spin_unlock_bh(&adapter->mcc_lock); |
| 2760 | return status; | 2761 | return status; |
| @@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | |||
| 2805 | 2806 | ||
| 2806 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); | 2807 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); |
| 2807 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); | 2808 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); |
| 2808 | attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, | 2809 | attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 2809 | &attribs_cmd.dma); | 2810 | attribs_cmd.size, |
| 2811 | &attribs_cmd.dma, GFP_ATOMIC); | ||
| 2810 | if (!attribs_cmd.va) { | 2812 | if (!attribs_cmd.va) { |
| 2811 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 2813 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); |
| 2812 | status = -ENOMEM; | 2814 | status = -ENOMEM; |
| @@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | |||
| 2833 | err: | 2835 | err: |
| 2834 | mutex_unlock(&adapter->mbox_lock); | 2836 | mutex_unlock(&adapter->mbox_lock); |
| 2835 | if (attribs_cmd.va) | 2837 | if (attribs_cmd.va) |
| 2836 | pci_free_consistent(adapter->pdev, attribs_cmd.size, | 2838 | dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, |
| 2837 | attribs_cmd.va, attribs_cmd.dma); | 2839 | attribs_cmd.va, attribs_cmd.dma); |
| 2838 | return status; | 2840 | return status; |
| 2839 | } | 2841 | } |
| 2840 | 2842 | ||
| @@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, | |||
| 2972 | 2974 | ||
| 2973 | memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); | 2975 | memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); |
| 2974 | get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); | 2976 | get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); |
| 2975 | get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, | 2977 | get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 2976 | get_mac_list_cmd.size, | 2978 | get_mac_list_cmd.size, |
| 2977 | &get_mac_list_cmd.dma); | 2979 | &get_mac_list_cmd.dma, |
| 2980 | GFP_ATOMIC); | ||
| 2978 | 2981 | ||
| 2979 | if (!get_mac_list_cmd.va) { | 2982 | if (!get_mac_list_cmd.va) { |
| 2980 | dev_err(&adapter->pdev->dev, | 2983 | dev_err(&adapter->pdev->dev, |
| @@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, | |||
| 3047 | 3050 | ||
| 3048 | out: | 3051 | out: |
| 3049 | spin_unlock_bh(&adapter->mcc_lock); | 3052 | spin_unlock_bh(&adapter->mcc_lock); |
| 3050 | pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, | 3053 | dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, |
| 3051 | get_mac_list_cmd.va, get_mac_list_cmd.dma); | 3054 | get_mac_list_cmd.va, get_mac_list_cmd.dma); |
| 3052 | return status; | 3055 | return status; |
| 3053 | } | 3056 | } |
| 3054 | 3057 | ||
| @@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, | |||
| 3101 | 3104 | ||
| 3102 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3105 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3103 | cmd.size = sizeof(struct be_cmd_req_set_mac_list); | 3106 | cmd.size = sizeof(struct be_cmd_req_set_mac_list); |
| 3104 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, | 3107 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3105 | &cmd.dma, GFP_KERNEL); | 3108 | GFP_KERNEL); |
| 3106 | if (!cmd.va) | 3109 | if (!cmd.va) |
| 3107 | return -ENOMEM; | 3110 | return -ENOMEM; |
| 3108 | 3111 | ||
| @@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) | |||
| 3291 | 3294 | ||
| 3292 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3295 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3293 | cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); | 3296 | cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); |
| 3294 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3297 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3298 | GFP_ATOMIC); | ||
| 3295 | if (!cmd.va) { | 3299 | if (!cmd.va) { |
| 3296 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 3300 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); |
| 3297 | status = -ENOMEM; | 3301 | status = -ENOMEM; |
| @@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) | |||
| 3326 | err: | 3330 | err: |
| 3327 | mutex_unlock(&adapter->mbox_lock); | 3331 | mutex_unlock(&adapter->mbox_lock); |
| 3328 | if (cmd.va) | 3332 | if (cmd.va) |
| 3329 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3333 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3334 | cmd.dma); | ||
| 3330 | return status; | 3335 | return status; |
| 3331 | 3336 | ||
| 3332 | } | 3337 | } |
| @@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) | |||
| 3340 | 3345 | ||
| 3341 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 3346 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); |
| 3342 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 3347 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); |
| 3343 | extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, | 3348 | extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 3344 | &extfat_cmd.dma); | 3349 | extfat_cmd.size, &extfat_cmd.dma, |
| 3350 | GFP_ATOMIC); | ||
| 3345 | if (!extfat_cmd.va) | 3351 | if (!extfat_cmd.va) |
| 3346 | return -ENOMEM; | 3352 | return -ENOMEM; |
| 3347 | 3353 | ||
| @@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) | |||
| 3363 | 3369 | ||
| 3364 | status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); | 3370 | status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); |
| 3365 | err: | 3371 | err: |
| 3366 | pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, | 3372 | dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, |
| 3367 | extfat_cmd.dma); | 3373 | extfat_cmd.dma); |
| 3368 | return status; | 3374 | return status; |
| 3369 | } | 3375 | } |
| 3370 | 3376 | ||
| @@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) | |||
| 3377 | 3383 | ||
| 3378 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 3384 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); |
| 3379 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 3385 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); |
| 3380 | extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, | 3386 | extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 3381 | &extfat_cmd.dma); | 3387 | extfat_cmd.size, &extfat_cmd.dma, |
| 3388 | GFP_ATOMIC); | ||
| 3382 | 3389 | ||
| 3383 | if (!extfat_cmd.va) { | 3390 | if (!extfat_cmd.va) { |
| 3384 | dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", | 3391 | dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", |
| @@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) | |||
| 3396 | level = cfgs->module[0].trace_lvl[j].dbg_lvl; | 3403 | level = cfgs->module[0].trace_lvl[j].dbg_lvl; |
| 3397 | } | 3404 | } |
| 3398 | } | 3405 | } |
| 3399 | pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, | 3406 | dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, |
| 3400 | extfat_cmd.dma); | 3407 | extfat_cmd.dma); |
| 3401 | err: | 3408 | err: |
| 3402 | return level; | 3409 | return level; |
| 3403 | } | 3410 | } |
| @@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) | |||
| 3595 | 3602 | ||
| 3596 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3603 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3597 | cmd.size = sizeof(struct be_cmd_resp_get_func_config); | 3604 | cmd.size = sizeof(struct be_cmd_resp_get_func_config); |
| 3598 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3605 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3606 | GFP_ATOMIC); | ||
| 3599 | if (!cmd.va) { | 3607 | if (!cmd.va) { |
| 3600 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 3608 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); |
| 3601 | status = -ENOMEM; | 3609 | status = -ENOMEM; |
| @@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) | |||
| 3635 | err: | 3643 | err: |
| 3636 | mutex_unlock(&adapter->mbox_lock); | 3644 | mutex_unlock(&adapter->mbox_lock); |
| 3637 | if (cmd.va) | 3645 | if (cmd.va) |
| 3638 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3646 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3647 | cmd.dma); | ||
| 3639 | return status; | 3648 | return status; |
| 3640 | } | 3649 | } |
| 3641 | 3650 | ||
| @@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
| 3656 | 3665 | ||
| 3657 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3666 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3658 | cmd.size = sizeof(struct be_cmd_resp_get_profile_config); | 3667 | cmd.size = sizeof(struct be_cmd_resp_get_profile_config); |
| 3659 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3668 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3669 | GFP_ATOMIC); | ||
| 3660 | if (!cmd.va) | 3670 | if (!cmd.va) |
| 3661 | return -ENOMEM; | 3671 | return -ENOMEM; |
| 3662 | 3672 | ||
| @@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
| 3702 | res->vf_if_cap_flags = vf_res->cap_flags; | 3712 | res->vf_if_cap_flags = vf_res->cap_flags; |
| 3703 | err: | 3713 | err: |
| 3704 | if (cmd.va) | 3714 | if (cmd.va) |
| 3705 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3715 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3716 | cmd.dma); | ||
| 3706 | return status; | 3717 | return status; |
| 3707 | } | 3718 | } |
| 3708 | 3719 | ||
| @@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, | |||
| 3717 | 3728 | ||
| 3718 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3729 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3719 | cmd.size = sizeof(struct be_cmd_req_set_profile_config); | 3730 | cmd.size = sizeof(struct be_cmd_req_set_profile_config); |
| 3720 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3731 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3732 | GFP_ATOMIC); | ||
| 3721 | if (!cmd.va) | 3733 | if (!cmd.va) |
| 3722 | return -ENOMEM; | 3734 | return -ENOMEM; |
| 3723 | 3735 | ||
| @@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, | |||
| 3733 | status = be_cmd_notify_wait(adapter, &wrb); | 3745 | status = be_cmd_notify_wait(adapter, &wrb); |
| 3734 | 3746 | ||
| 3735 | if (cmd.va) | 3747 | if (cmd.va) |
| 3736 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3748 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3749 | cmd.dma); | ||
| 3737 | return status; | 3750 | return status; |
| 3738 | } | 3751 | } |
| 3739 | 3752 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b765c24625bf..2835dee5dc39 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |||
| @@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, | |||
| 264 | int status = 0; | 264 | int status = 0; |
| 265 | 265 | ||
| 266 | read_cmd.size = LANCER_READ_FILE_CHUNK; | 266 | read_cmd.size = LANCER_READ_FILE_CHUNK; |
| 267 | read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, | 267 | read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, |
| 268 | &read_cmd.dma); | 268 | &read_cmd.dma, GFP_ATOMIC); |
| 269 | 269 | ||
| 270 | if (!read_cmd.va) { | 270 | if (!read_cmd.va) { |
| 271 | dev_err(&adapter->pdev->dev, | 271 | dev_err(&adapter->pdev->dev, |
| @@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, | |||
| 289 | break; | 289 | break; |
| 290 | } | 290 | } |
| 291 | } | 291 | } |
| 292 | pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, | 292 | dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, |
| 293 | read_cmd.dma); | 293 | read_cmd.dma); |
| 294 | 294 | ||
| 295 | return status; | 295 | return status; |
| 296 | } | 296 | } |
| @@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) | |||
| 818 | }; | 818 | }; |
| 819 | 819 | ||
| 820 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | 820 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); |
| 821 | ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, | 821 | ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 822 | &ddrdma_cmd.dma, GFP_KERNEL); | 822 | ddrdma_cmd.size, &ddrdma_cmd.dma, |
| 823 | GFP_KERNEL); | ||
| 823 | if (!ddrdma_cmd.va) | 824 | if (!ddrdma_cmd.va) |
| 824 | return -ENOMEM; | 825 | return -ENOMEM; |
| 825 | 826 | ||
| @@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev, | |||
| 941 | 942 | ||
| 942 | memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); | 943 | memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); |
| 943 | eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); | 944 | eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); |
| 944 | eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, | 945 | eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 945 | &eeprom_cmd.dma, GFP_KERNEL); | 946 | eeprom_cmd.size, &eeprom_cmd.dma, |
| 947 | GFP_KERNEL); | ||
| 946 | 948 | ||
| 947 | if (!eeprom_cmd.va) | 949 | if (!eeprom_cmd.va) |
| 948 | return -ENOMEM; | 950 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fb0bc3c3620e..e43cc8a73ea7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter) | |||
| 2358 | adapter->cfg_num_qs); | 2358 | adapter->cfg_num_qs); |
| 2359 | 2359 | ||
| 2360 | for_all_evt_queues(adapter, eqo, i) { | 2360 | for_all_evt_queues(adapter, eqo, i) { |
| 2361 | int numa_node = dev_to_node(&adapter->pdev->dev); | ||
| 2361 | if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) | 2362 | if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) |
| 2362 | return -ENOMEM; | 2363 | return -ENOMEM; |
| 2363 | cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), | 2364 | cpumask_set_cpu(cpumask_local_spread(i, numa_node), |
| 2364 | eqo->affinity_mask); | 2365 | eqo->affinity_mask); |
| 2365 | |||
| 2366 | netif_napi_add(adapter->netdev, &eqo->napi, be_poll, | 2366 | netif_napi_add(adapter->netdev, &eqo->napi, be_poll, |
| 2367 | BE_NAPI_WEIGHT); | 2367 | BE_NAPI_WEIGHT); |
| 2368 | napi_hash_add(&eqo->napi); | 2368 | napi_hash_add(&eqo->napi); |
| @@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter, | |||
| 4605 | 4605 | ||
| 4606 | flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) | 4606 | flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) |
| 4607 | + LANCER_FW_DOWNLOAD_CHUNK; | 4607 | + LANCER_FW_DOWNLOAD_CHUNK; |
| 4608 | flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, | 4608 | flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, |
| 4609 | &flash_cmd.dma, GFP_KERNEL); | 4609 | &flash_cmd.dma, GFP_KERNEL); |
| 4610 | if (!flash_cmd.va) | 4610 | if (!flash_cmd.va) |
| 4611 | return -ENOMEM; | 4611 | return -ENOMEM; |
| 4612 | 4612 | ||
| @@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) | |||
| 4739 | } | 4739 | } |
| 4740 | 4740 | ||
| 4741 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom); | 4741 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom); |
| 4742 | flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, | 4742 | flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, |
| 4743 | GFP_KERNEL); | 4743 | GFP_KERNEL); |
| 4744 | if (!flash_cmd.va) | 4744 | if (!flash_cmd.va) |
| 4745 | return -ENOMEM; | 4745 | return -ENOMEM; |
| 4746 | 4746 | ||
| @@ -4846,7 +4846,8 @@ err: | |||
| 4846 | } | 4846 | } |
| 4847 | 4847 | ||
| 4848 | static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 4848 | static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 4849 | struct net_device *dev, u32 filter_mask) | 4849 | struct net_device *dev, u32 filter_mask, |
| 4850 | int nlflags) | ||
| 4850 | { | 4851 | { |
| 4851 | struct be_adapter *adapter = netdev_priv(dev); | 4852 | struct be_adapter *adapter = netdev_priv(dev); |
| 4852 | int status = 0; | 4853 | int status = 0; |
| @@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 4868 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, | 4869 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, |
| 4869 | hsw_mode == PORT_FWD_TYPE_VEPA ? | 4870 | hsw_mode == PORT_FWD_TYPE_VEPA ? |
| 4870 | BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, | 4871 | BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, |
| 4871 | 0, 0); | 4872 | 0, 0, nlflags); |
| 4872 | } | 4873 | } |
| 4873 | 4874 | ||
| 4874 | #ifdef CONFIG_BE2NET_VXLAN | 4875 | #ifdef CONFIG_BE2NET_VXLAN |
| @@ -5290,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter) | |||
| 5290 | int status = 0; | 5291 | int status = 0; |
| 5291 | 5292 | ||
| 5292 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 5293 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
| 5293 | mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, | 5294 | mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, |
| 5294 | &mbox_mem_alloc->dma, | 5295 | &mbox_mem_alloc->dma, |
| 5295 | GFP_KERNEL); | 5296 | GFP_KERNEL); |
| 5296 | if (!mbox_mem_alloc->va) | 5297 | if (!mbox_mem_alloc->va) |
| 5297 | return -ENOMEM; | 5298 | return -ENOMEM; |
| 5298 | 5299 | ||
| 5299 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | 5300 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); |
| 5300 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | 5301 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); |
| 5301 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 5302 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); |
| 5302 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | ||
| 5303 | 5303 | ||
| 5304 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); | 5304 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); |
| 5305 | rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, | 5305 | rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f6a3a7abd468..66d47e448e4d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev) | |||
| 988 | rcntl |= 0x40000000 | 0x00000020; | 988 | rcntl |= 0x40000000 | 0x00000020; |
| 989 | 989 | ||
| 990 | /* RGMII, RMII or MII */ | 990 | /* RGMII, RMII or MII */ |
| 991 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) | 991 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || |
| 992 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || | ||
| 993 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || | ||
| 994 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) | ||
| 992 | rcntl |= (1 << 6); | 995 | rcntl |= (1 << 6); |
| 993 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) | 996 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
| 994 | rcntl |= (1 << 8); | 997 | rcntl |= (1 << 8); |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 291c87036e17..2a0dc127df3f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
| @@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void) | |||
| 3347 | { | 3347 | { |
| 3348 | int ret = 0; | 3348 | int ret = 0; |
| 3349 | 3349 | ||
| 3350 | if (atomic_inc_and_test(&ehea_memory_hooks_registered)) | 3350 | if (atomic_inc_return(&ehea_memory_hooks_registered) > 1) |
| 3351 | return 0; | 3351 | return 0; |
| 3352 | 3352 | ||
| 3353 | ret = ehea_create_busmap(); | 3353 | ret = ehea_create_busmap(); |
| @@ -3381,12 +3381,14 @@ out3: | |||
| 3381 | out2: | 3381 | out2: |
| 3382 | unregister_reboot_notifier(&ehea_reboot_nb); | 3382 | unregister_reboot_notifier(&ehea_reboot_nb); |
| 3383 | out: | 3383 | out: |
| 3384 | atomic_dec(&ehea_memory_hooks_registered); | ||
| 3384 | return ret; | 3385 | return ret; |
| 3385 | } | 3386 | } |
| 3386 | 3387 | ||
| 3387 | static void ehea_unregister_memory_hooks(void) | 3388 | static void ehea_unregister_memory_hooks(void) |
| 3388 | { | 3389 | { |
| 3389 | if (atomic_read(&ehea_memory_hooks_registered)) | 3390 | /* Only remove the hooks if we've registered them */ |
| 3391 | if (atomic_read(&ehea_memory_hooks_registered) == 0) | ||
| 3390 | return; | 3392 | return; |
| 3391 | 3393 | ||
| 3392 | unregister_reboot_notifier(&ehea_reboot_nb); | 3394 | unregister_reboot_notifier(&ehea_reboot_nb); |
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index de7919322190..b9df0cbd0a38 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
| @@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev, | |||
| 2084 | 2084 | ||
| 2085 | static int emac_get_regs_len(struct emac_instance *dev) | 2085 | static int emac_get_regs_len(struct emac_instance *dev) |
| 2086 | { | 2086 | { |
| 2087 | if (emac_has_feature(dev, EMAC_FTR_EMAC4)) | ||
| 2088 | return sizeof(struct emac_ethtool_regs_subhdr) + | ||
| 2089 | EMAC4_ETHTOOL_REGS_SIZE(dev); | ||
| 2090 | else | ||
| 2091 | return sizeof(struct emac_ethtool_regs_subhdr) + | 2087 | return sizeof(struct emac_ethtool_regs_subhdr) + |
| 2092 | EMAC_ETHTOOL_REGS_SIZE(dev); | 2088 | sizeof(struct emac_regs); |
| 2093 | } | 2089 | } |
| 2094 | 2090 | ||
| 2095 | static int emac_ethtool_get_regs_len(struct net_device *ndev) | 2091 | static int emac_ethtool_get_regs_len(struct net_device *ndev) |
| @@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf) | |||
| 2114 | struct emac_ethtool_regs_subhdr *hdr = buf; | 2110 | struct emac_ethtool_regs_subhdr *hdr = buf; |
| 2115 | 2111 | ||
| 2116 | hdr->index = dev->cell_index; | 2112 | hdr->index = dev->cell_index; |
| 2117 | if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { | 2113 | if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { |
| 2114 | hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER; | ||
| 2115 | } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { | ||
| 2118 | hdr->version = EMAC4_ETHTOOL_REGS_VER; | 2116 | hdr->version = EMAC4_ETHTOOL_REGS_VER; |
| 2119 | memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); | ||
| 2120 | return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev); | ||
| 2121 | } else { | 2117 | } else { |
| 2122 | hdr->version = EMAC_ETHTOOL_REGS_VER; | 2118 | hdr->version = EMAC_ETHTOOL_REGS_VER; |
| 2123 | memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); | ||
| 2124 | return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev); | ||
| 2125 | } | 2119 | } |
| 2120 | memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs)); | ||
| 2121 | return (void *)(hdr + 1) + sizeof(struct emac_regs); | ||
| 2126 | } | 2122 | } |
| 2127 | 2123 | ||
| 2128 | static void emac_ethtool_get_regs(struct net_device *ndev, | 2124 | static void emac_ethtool_get_regs(struct net_device *ndev, |
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 67f342a9f65e..28df37420da9 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h | |||
| @@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr { | |||
| 461 | }; | 461 | }; |
| 462 | 462 | ||
| 463 | #define EMAC_ETHTOOL_REGS_VER 0 | 463 | #define EMAC_ETHTOOL_REGS_VER 0 |
| 464 | #define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ | 464 | #define EMAC4_ETHTOOL_REGS_VER 1 |
| 465 | (dev)->rsrc_regs.start + 1) | 465 | #define EMAC4SYNC_ETHTOOL_REGS_VER 2 |
| 466 | #define EMAC4_ETHTOOL_REGS_VER 1 | ||
| 467 | #define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ | ||
| 468 | (dev)->rsrc_regs.start + 1) | ||
| 469 | 466 | ||
| 470 | #endif /* __IBM_NEWEMAC_CORE_H */ | 467 | #endif /* __IBM_NEWEMAC_CORE_H */ |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index cd7675ac5bf9..18134766a114 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
| @@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1238 | return -EINVAL; | 1238 | return -EINVAL; |
| 1239 | 1239 | ||
| 1240 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | 1240 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
| 1241 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) | 1241 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) |
| 1242 | break; | 1242 | break; |
| 1243 | 1243 | ||
| 1244 | if (i == IBMVETH_NUM_BUFF_POOLS) | 1244 | if (i == IBMVETH_NUM_BUFF_POOLS) |
| @@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1257 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1257 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
| 1258 | adapter->rx_buff_pool[i].active = 1; | 1258 | adapter->rx_buff_pool[i].active = 1; |
| 1259 | 1259 | ||
| 1260 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1260 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { |
| 1261 | dev->mtu = new_mtu; | 1261 | dev->mtu = new_mtu; |
| 1262 | vio_cmo_set_dev_desired(viodev, | 1262 | vio_cmo_set_dev_desired(viodev, |
| 1263 | ibmveth_get_desired_dma | 1263 | ibmveth_get_desired_dma |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 5d9ceb17b4cb..0abc942c966e 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/ptp_classify.h> | 40 | #include <linux/ptp_classify.h> |
| 41 | #include <linux/mii.h> | 41 | #include <linux/mii.h> |
| 42 | #include <linux/mdio.h> | 42 | #include <linux/mdio.h> |
| 43 | #include <linux/pm_qos.h> | ||
| 43 | #include "hw.h" | 44 | #include "hw.h" |
| 44 | 45 | ||
| 45 | struct e1000_info; | 46 | struct e1000_info; |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 1b0661e3573b..c754b2027281 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
| @@ -610,7 +610,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, | |||
| 610 | unsigned int total_bytes = 0, total_packets = 0; | 610 | unsigned int total_bytes = 0, total_packets = 0; |
| 611 | u16 cleaned_count = fm10k_desc_unused(rx_ring); | 611 | u16 cleaned_count = fm10k_desc_unused(rx_ring); |
| 612 | 612 | ||
| 613 | do { | 613 | while (likely(total_packets < budget)) { |
| 614 | union fm10k_rx_desc *rx_desc; | 614 | union fm10k_rx_desc *rx_desc; |
| 615 | 615 | ||
| 616 | /* return some buffers to hardware, one at a time is too slow */ | 616 | /* return some buffers to hardware, one at a time is too slow */ |
| @@ -659,7 +659,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, | |||
| 659 | 659 | ||
| 660 | /* update budget accounting */ | 660 | /* update budget accounting */ |
| 661 | total_packets++; | 661 | total_packets++; |
| 662 | } while (likely(total_packets < budget)); | 662 | } |
| 663 | 663 | ||
| 664 | /* place incomplete frames back on ring for completion */ | 664 | /* place incomplete frames back on ring for completion */ |
| 665 | rx_ring->skb = skb; | 665 | rx_ring->skb = skb; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420..5d47307121ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -317,6 +317,7 @@ struct i40e_pf { | |||
| 317 | #endif | 317 | #endif |
| 318 | #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) | 318 | #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) |
| 319 | #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) | 319 | #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) |
| 320 | #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) | ||
| 320 | 321 | ||
| 321 | /* tracks features that get auto disabled by errors */ | 322 | /* tracks features that get auto disabled by errors */ |
| 322 | u64 auto_disable_flags; | 323 | u64 auto_disable_flags; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 34170eabca7d..da0faf478af0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
| @@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
| 1021 | goto command_write_done; | 1021 | goto command_write_done; |
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| 1024 | /* By default we are in VEPA mode, if this is the first VF/VMDq | ||
| 1025 | * VSI to be added switch to VEB mode. | ||
| 1026 | */ | ||
| 1027 | if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { | ||
| 1028 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 1029 | i40e_do_reset_safe(pf, | ||
| 1030 | BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 1031 | } | ||
| 1032 | |||
| 1024 | vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); | 1033 | vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); |
| 1025 | if (vsi) | 1034 | if (vsi) |
| 1026 | dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", | 1035 | dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 24481cd7e59a..5b5bea159bd5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) | |||
| 6097 | if (ret) | 6097 | if (ret) |
| 6098 | goto end_reconstitute; | 6098 | goto end_reconstitute; |
| 6099 | 6099 | ||
| 6100 | if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) | ||
| 6101 | veb->bridge_mode = BRIDGE_MODE_VEB; | ||
| 6102 | else | ||
| 6103 | veb->bridge_mode = BRIDGE_MODE_VEPA; | ||
| 6100 | i40e_config_bridge_mode(veb); | 6104 | i40e_config_bridge_mode(veb); |
| 6101 | 6105 | ||
| 6102 | /* create the remaining VSIs attached to this VEB */ | 6106 | /* create the remaining VSIs attached to this VEB */ |
| @@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, | |||
| 8031 | } else if (mode != veb->bridge_mode) { | 8035 | } else if (mode != veb->bridge_mode) { |
| 8032 | /* Existing HW bridge but different mode needs reset */ | 8036 | /* Existing HW bridge but different mode needs reset */ |
| 8033 | veb->bridge_mode = mode; | 8037 | veb->bridge_mode = mode; |
| 8034 | i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); | 8038 | /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ |
| 8039 | if (mode == BRIDGE_MODE_VEB) | ||
| 8040 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 8041 | else | ||
| 8042 | pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; | ||
| 8043 | i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 8035 | break; | 8044 | break; |
| 8036 | } | 8045 | } |
| 8037 | } | 8046 | } |
| @@ -8053,10 +8062,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, | |||
| 8053 | #ifdef HAVE_BRIDGE_FILTER | 8062 | #ifdef HAVE_BRIDGE_FILTER |
| 8054 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 8063 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 8055 | struct net_device *dev, | 8064 | struct net_device *dev, |
| 8056 | u32 __always_unused filter_mask) | 8065 | u32 __always_unused filter_mask, int nlflags) |
| 8057 | #else | 8066 | #else |
| 8058 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 8067 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 8059 | struct net_device *dev) | 8068 | struct net_device *dev, int nlflags) |
| 8060 | #endif /* HAVE_BRIDGE_FILTER */ | 8069 | #endif /* HAVE_BRIDGE_FILTER */ |
| 8061 | { | 8070 | { |
| 8062 | struct i40e_netdev_priv *np = netdev_priv(dev); | 8071 | struct i40e_netdev_priv *np = netdev_priv(dev); |
| @@ -8078,7 +8087,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 8078 | if (!veb) | 8087 | if (!veb) |
| 8079 | return 0; | 8088 | return 0; |
| 8080 | 8089 | ||
| 8081 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); | 8090 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, |
| 8091 | nlflags); | ||
| 8082 | } | 8092 | } |
| 8083 | #endif /* HAVE_BRIDGE_ATTRIBS */ | 8093 | #endif /* HAVE_BRIDGE_ATTRIBS */ |
| 8084 | 8094 | ||
| @@ -8342,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) | |||
| 8342 | ctxt.uplink_seid = vsi->uplink_seid; | 8352 | ctxt.uplink_seid = vsi->uplink_seid; |
| 8343 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; | 8353 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
| 8344 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; | 8354 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; |
| 8345 | if (i40e_is_vsi_uplink_mode_veb(vsi)) { | 8355 | if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && |
| 8356 | (i40e_is_vsi_uplink_mode_veb(vsi))) { | ||
| 8346 | ctxt.info.valid_sections |= | 8357 | ctxt.info.valid_sections |= |
| 8347 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | 8358 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); |
| 8348 | ctxt.info.switch_id = | 8359 | ctxt.info.switch_id = |
| 8349 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | 8360 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); |
| 8350 | } | 8361 | } |
| 8351 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | 8362 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); |
| 8352 | break; | 8363 | break; |
| @@ -8745,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, | |||
| 8745 | __func__); | 8756 | __func__); |
| 8746 | return NULL; | 8757 | return NULL; |
| 8747 | } | 8758 | } |
| 8759 | /* We come up by default in VEPA mode if SRIOV is not | ||
| 8760 | * already enabled, in which case we can't force VEPA | ||
| 8761 | * mode. | ||
| 8762 | */ | ||
| 8763 | if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { | ||
| 8764 | veb->bridge_mode = BRIDGE_MODE_VEPA; | ||
| 8765 | pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; | ||
| 8766 | } | ||
| 8748 | i40e_config_bridge_mode(veb); | 8767 | i40e_config_bridge_mode(veb); |
| 8749 | } | 8768 | } |
| 8750 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { | 8769 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { |
| @@ -9855,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 9855 | goto err_switch_setup; | 9874 | goto err_switch_setup; |
| 9856 | } | 9875 | } |
| 9857 | 9876 | ||
| 9877 | #ifdef CONFIG_PCI_IOV | ||
| 9878 | /* prep for VF support */ | ||
| 9879 | if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && | ||
| 9880 | (pf->flags & I40E_FLAG_MSIX_ENABLED) && | ||
| 9881 | !test_bit(__I40E_BAD_EEPROM, &pf->state)) { | ||
| 9882 | if (pci_num_vf(pdev)) | ||
| 9883 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 9884 | } | ||
| 9885 | #endif | ||
| 9858 | err = i40e_setup_pf_switch(pf, false); | 9886 | err = i40e_setup_pf_switch(pf, false); |
| 9859 | if (err) { | 9887 | if (err) { |
| 9860 | dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); | 9888 | dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4bd3a80aba82..9d95042d5a0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | |||
| 2410 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | 2410 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet |
| 2411 | * @skb: send buffer | 2411 | * @skb: send buffer |
| 2412 | * @tx_flags: collected send information | 2412 | * @tx_flags: collected send information |
| 2413 | * @hdr_len: size of the packet header | ||
| 2414 | * | 2413 | * |
| 2415 | * Note: Our HW can't scatter-gather more than 8 fragments to build | 2414 | * Note: Our HW can't scatter-gather more than 8 fragments to build |
| 2416 | * a packet on the wire and so we need to figure out the cases where we | 2415 | * a packet on the wire and so we need to figure out the cases where we |
| 2417 | * need to linearize the skb. | 2416 | * need to linearize the skb. |
| 2418 | **/ | 2417 | **/ |
| 2419 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | 2418 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) |
| 2420 | const u8 hdr_len) | ||
| 2421 | { | 2419 | { |
| 2422 | struct skb_frag_struct *frag; | 2420 | struct skb_frag_struct *frag; |
| 2423 | bool linearize = false; | 2421 | bool linearize = false; |
| @@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 2429 | gso_segs = skb_shinfo(skb)->gso_segs; | 2427 | gso_segs = skb_shinfo(skb)->gso_segs; |
| 2430 | 2428 | ||
| 2431 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | 2429 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { |
| 2432 | u16 j = 1; | 2430 | u16 j = 0; |
| 2433 | 2431 | ||
| 2434 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | 2432 | if (num_frags < (I40E_MAX_BUFFER_TXD)) |
| 2435 | goto linearize_chk_done; | 2433 | goto linearize_chk_done; |
| @@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 2440 | goto linearize_chk_done; | 2438 | goto linearize_chk_done; |
| 2441 | } | 2439 | } |
| 2442 | frag = &skb_shinfo(skb)->frags[0]; | 2440 | frag = &skb_shinfo(skb)->frags[0]; |
| 2443 | size = hdr_len; | ||
| 2444 | /* we might still have more fragments per segment */ | 2441 | /* we might still have more fragments per segment */ |
| 2445 | do { | 2442 | do { |
| 2446 | size += skb_frag_size(frag); | 2443 | size += skb_frag_size(frag); |
| 2447 | frag++; j++; | 2444 | frag++; j++; |
| 2445 | if ((size >= skb_shinfo(skb)->gso_size) && | ||
| 2446 | (j < I40E_MAX_BUFFER_TXD)) { | ||
| 2447 | size = (size % skb_shinfo(skb)->gso_size); | ||
| 2448 | j = (size) ? 1 : 0; | ||
| 2449 | } | ||
| 2448 | if (j == I40E_MAX_BUFFER_TXD) { | 2450 | if (j == I40E_MAX_BUFFER_TXD) { |
| 2449 | if (size < skb_shinfo(skb)->gso_size) { | 2451 | linearize = true; |
| 2450 | linearize = true; | 2452 | break; |
| 2451 | break; | ||
| 2452 | } | ||
| 2453 | j = 1; | ||
| 2454 | size -= skb_shinfo(skb)->gso_size; | ||
| 2455 | if (size) | ||
| 2456 | j++; | ||
| 2457 | size += hdr_len; | ||
| 2458 | } | 2453 | } |
| 2459 | num_frags--; | 2454 | num_frags--; |
| 2460 | } while (num_frags); | 2455 | } while (num_frags); |
| @@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 2724 | if (tsyn) | 2719 | if (tsyn) |
| 2725 | tx_flags |= I40E_TX_FLAGS_TSYN; | 2720 | tx_flags |= I40E_TX_FLAGS_TSYN; |
| 2726 | 2721 | ||
| 2727 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | 2722 | if (i40e_chk_linearize(skb, tx_flags)) |
| 2728 | if (skb_linearize(skb)) | 2723 | if (skb_linearize(skb)) |
| 2729 | goto out_drop; | 2724 | goto out_drop; |
| 2730 | 2725 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e..4e9376da0518 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
| @@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) | |||
| 1018 | { | 1018 | { |
| 1019 | struct i40e_pf *pf = pci_get_drvdata(pdev); | 1019 | struct i40e_pf *pf = pci_get_drvdata(pdev); |
| 1020 | 1020 | ||
| 1021 | if (num_vfs) | 1021 | if (num_vfs) { |
| 1022 | if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { | ||
| 1023 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 1024 | i40e_do_reset_safe(pf, | ||
| 1025 | BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 1026 | } | ||
| 1022 | return i40e_pci_sriov_enable(pdev, num_vfs); | 1027 | return i40e_pci_sriov_enable(pdev, num_vfs); |
| 1028 | } | ||
| 1023 | 1029 | ||
| 1024 | if (!pci_vfs_assigned(pf->pdev)) { | 1030 | if (!pci_vfs_assigned(pf->pdev)) { |
| 1025 | i40e_free_vfs(pf); | 1031 | i40e_free_vfs(pf); |
| 1032 | pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; | ||
| 1033 | i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 1026 | } else { | 1034 | } else { |
| 1027 | dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); | 1035 | dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); |
| 1028 | return -EINVAL; | 1036 | return -EINVAL; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b077e02a0cc7..458fbb421090 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
| @@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, | |||
| 1619 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | 1619 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet |
| 1620 | * @skb: send buffer | 1620 | * @skb: send buffer |
| 1621 | * @tx_flags: collected send information | 1621 | * @tx_flags: collected send information |
| 1622 | * @hdr_len: size of the packet header | ||
| 1623 | * | 1622 | * |
| 1624 | * Note: Our HW can't scatter-gather more than 8 fragments to build | 1623 | * Note: Our HW can't scatter-gather more than 8 fragments to build |
| 1625 | * a packet on the wire and so we need to figure out the cases where we | 1624 | * a packet on the wire and so we need to figure out the cases where we |
| 1626 | * need to linearize the skb. | 1625 | * need to linearize the skb. |
| 1627 | **/ | 1626 | **/ |
| 1628 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | 1627 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) |
| 1629 | const u8 hdr_len) | ||
| 1630 | { | 1628 | { |
| 1631 | struct skb_frag_struct *frag; | 1629 | struct skb_frag_struct *frag; |
| 1632 | bool linearize = false; | 1630 | bool linearize = false; |
| @@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 1638 | gso_segs = skb_shinfo(skb)->gso_segs; | 1636 | gso_segs = skb_shinfo(skb)->gso_segs; |
| 1639 | 1637 | ||
| 1640 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | 1638 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { |
| 1641 | u16 j = 1; | 1639 | u16 j = 0; |
| 1642 | 1640 | ||
| 1643 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | 1641 | if (num_frags < (I40E_MAX_BUFFER_TXD)) |
| 1644 | goto linearize_chk_done; | 1642 | goto linearize_chk_done; |
| @@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 1649 | goto linearize_chk_done; | 1647 | goto linearize_chk_done; |
| 1650 | } | 1648 | } |
| 1651 | frag = &skb_shinfo(skb)->frags[0]; | 1649 | frag = &skb_shinfo(skb)->frags[0]; |
| 1652 | size = hdr_len; | ||
| 1653 | /* we might still have more fragments per segment */ | 1650 | /* we might still have more fragments per segment */ |
| 1654 | do { | 1651 | do { |
| 1655 | size += skb_frag_size(frag); | 1652 | size += skb_frag_size(frag); |
| 1656 | frag++; j++; | 1653 | frag++; j++; |
| 1654 | if ((size >= skb_shinfo(skb)->gso_size) && | ||
| 1655 | (j < I40E_MAX_BUFFER_TXD)) { | ||
| 1656 | size = (size % skb_shinfo(skb)->gso_size); | ||
| 1657 | j = (size) ? 1 : 0; | ||
| 1658 | } | ||
| 1657 | if (j == I40E_MAX_BUFFER_TXD) { | 1659 | if (j == I40E_MAX_BUFFER_TXD) { |
| 1658 | if (size < skb_shinfo(skb)->gso_size) { | 1660 | linearize = true; |
| 1659 | linearize = true; | 1661 | break; |
| 1660 | break; | ||
| 1661 | } | ||
| 1662 | j = 1; | ||
| 1663 | size -= skb_shinfo(skb)->gso_size; | ||
| 1664 | if (size) | ||
| 1665 | j++; | ||
| 1666 | size += hdr_len; | ||
| 1667 | } | 1662 | } |
| 1668 | num_frags--; | 1663 | num_frags--; |
| 1669 | } while (num_frags); | 1664 | } while (num_frags); |
| @@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 1950 | else if (tso) | 1945 | else if (tso) |
| 1951 | tx_flags |= I40E_TX_FLAGS_TSO; | 1946 | tx_flags |= I40E_TX_FLAGS_TSO; |
| 1952 | 1947 | ||
| 1953 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | 1948 | if (i40e_chk_linearize(skb, tx_flags)) |
| 1954 | if (skb_linearize(skb)) | 1949 | if (skb_linearize(skb)) |
| 1955 | goto out_drop; | 1950 | goto out_drop; |
| 1956 | 1951 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8457d0306e3a..a0a9b1fcb5e8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -1036,7 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) | |||
| 1036 | adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; | 1036 | adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; |
| 1037 | 1037 | ||
| 1038 | if (q_vector->rx.ring) | 1038 | if (q_vector->rx.ring) |
| 1039 | adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL; | 1039 | adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; |
| 1040 | 1040 | ||
| 1041 | netif_napi_del(&q_vector->napi); | 1041 | netif_napi_del(&q_vector->napi); |
| 1042 | 1042 | ||
| @@ -1207,6 +1207,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, | |||
| 1207 | q_vector = adapter->q_vector[v_idx]; | 1207 | q_vector = adapter->q_vector[v_idx]; |
| 1208 | if (!q_vector) | 1208 | if (!q_vector) |
| 1209 | q_vector = kzalloc(size, GFP_KERNEL); | 1209 | q_vector = kzalloc(size, GFP_KERNEL); |
| 1210 | else | ||
| 1211 | memset(q_vector, 0, size); | ||
| 1210 | if (!q_vector) | 1212 | if (!q_vector) |
| 1211 | return -ENOMEM; | 1213 | return -ENOMEM; |
| 1212 | 1214 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e3b9b63ad010..c3a9392cbc19 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
| @@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, | |||
| 538 | igb->perout[i].start.tv_nsec = rq->perout.start.nsec; | 538 | igb->perout[i].start.tv_nsec = rq->perout.start.nsec; |
| 539 | igb->perout[i].period.tv_sec = ts.tv_sec; | 539 | igb->perout[i].period.tv_sec = ts.tv_sec; |
| 540 | igb->perout[i].period.tv_nsec = ts.tv_nsec; | 540 | igb->perout[i].period.tv_nsec = ts.tv_nsec; |
| 541 | wr32(trgttiml, rq->perout.start.sec); | 541 | wr32(trgttimh, rq->perout.start.sec); |
| 542 | wr32(trgttimh, rq->perout.start.nsec); | 542 | wr32(trgttiml, rq->perout.start.nsec); |
| 543 | tsauxc |= tsauxc_mask; | 543 | tsauxc |= tsauxc_mask; |
| 544 | tsim |= tsim_mask; | 544 | tsim |= tsim_mask; |
| 545 | } else { | 545 | } else { |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d3f4b0ceb3f7..5be12a00e1f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, | |||
| 8044 | 8044 | ||
| 8045 | static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 8045 | static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 8046 | struct net_device *dev, | 8046 | struct net_device *dev, |
| 8047 | u32 filter_mask) | 8047 | u32 filter_mask, int nlflags) |
| 8048 | { | 8048 | { |
| 8049 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 8049 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
| 8050 | 8050 | ||
| @@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
| 8052 | return 0; | 8052 | return 0; |
| 8053 | 8053 | ||
| 8054 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, | 8054 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, |
| 8055 | adapter->bridge_mode, 0, 0); | 8055 | adapter->bridge_mode, 0, 0, nlflags); |
| 8056 | } | 8056 | } |
| 8057 | 8057 | ||
| 8058 | static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) | 8058 | static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a16d267fbce4..e71cdde9cb01 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
| @@ -3612,7 +3612,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
| 3612 | u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); | 3612 | u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); |
| 3613 | 3613 | ||
| 3614 | if (!dst_mac || is_link_local_ether_addr(dst_mac)) { | 3614 | if (!dst_mac || is_link_local_ether_addr(dst_mac)) { |
| 3615 | dev_kfree_skb(skb); | 3615 | dev_kfree_skb_any(skb); |
| 3616 | return NETDEV_TX_OK; | 3616 | return NETDEV_TX_OK; |
| 3617 | } | 3617 | } |
| 3618 | 3618 | ||
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index af829c578400..7ace07dad6a3 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
| @@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
| 1508 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); | 1508 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
| 1509 | if (!np) { | 1509 | if (!np) { |
| 1510 | dev_err(&pdev->dev, "missing phy-handle\n"); | 1510 | dev_err(&pdev->dev, "missing phy-handle\n"); |
| 1511 | return -EINVAL; | 1511 | err = -EINVAL; |
| 1512 | goto err_netdev; | ||
| 1512 | } | 1513 | } |
| 1513 | of_property_read_u32(np, "reg", &pep->phy_addr); | 1514 | of_property_read_u32(np, "reg", &pep->phy_addr); |
| 1514 | pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); | 1515 | pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); |
| @@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
| 1526 | pep->smi_bus = mdiobus_alloc(); | 1527 | pep->smi_bus = mdiobus_alloc(); |
| 1527 | if (pep->smi_bus == NULL) { | 1528 | if (pep->smi_bus == NULL) { |
| 1528 | err = -ENOMEM; | 1529 | err = -ENOMEM; |
| 1529 | goto err_base; | 1530 | goto err_netdev; |
| 1530 | } | 1531 | } |
| 1531 | pep->smi_bus->priv = pep; | 1532 | pep->smi_bus->priv = pep; |
| 1532 | pep->smi_bus->name = "pxa168_eth smi"; | 1533 | pep->smi_bus->name = "pxa168_eth smi"; |
| @@ -1551,13 +1552,10 @@ err_mdiobus: | |||
| 1551 | mdiobus_unregister(pep->smi_bus); | 1552 | mdiobus_unregister(pep->smi_bus); |
| 1552 | err_free_mdio: | 1553 | err_free_mdio: |
| 1553 | mdiobus_free(pep->smi_bus); | 1554 | mdiobus_free(pep->smi_bus); |
| 1554 | err_base: | ||
| 1555 | iounmap(pep->base); | ||
| 1556 | err_netdev: | 1555 | err_netdev: |
| 1557 | free_netdev(dev); | 1556 | free_netdev(dev); |
| 1558 | err_clk: | 1557 | err_clk: |
| 1559 | clk_disable(clk); | 1558 | clk_disable_unprepare(clk); |
| 1560 | clk_put(clk); | ||
| 1561 | return err; | 1559 | return err; |
| 1562 | } | 1560 | } |
| 1563 | 1561 | ||
| @@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev) | |||
| 1574 | if (pep->phy) | 1572 | if (pep->phy) |
| 1575 | phy_disconnect(pep->phy); | 1573 | phy_disconnect(pep->phy); |
| 1576 | if (pep->clk) { | 1574 | if (pep->clk) { |
| 1577 | clk_disable(pep->clk); | 1575 | clk_disable_unprepare(pep->clk); |
| 1578 | clk_put(pep->clk); | ||
| 1579 | pep->clk = NULL; | ||
| 1580 | } | 1576 | } |
| 1581 | 1577 | ||
| 1582 | iounmap(pep->base); | ||
| 1583 | pep->base = NULL; | ||
| 1584 | mdiobus_unregister(pep->smi_bus); | 1578 | mdiobus_unregister(pep->smi_bus); |
| 1585 | mdiobus_free(pep->smi_bus); | 1579 | mdiobus_free(pep->smi_bus); |
| 1586 | unregister_netdev(dev); | 1580 | unregister_netdev(dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 4f7dc044601e..529ef0594b90 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, | |||
| 714 | msecs_to_jiffies(timeout))) { | 714 | msecs_to_jiffies(timeout))) { |
| 715 | mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", | 715 | mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", |
| 716 | op); | 716 | op); |
| 717 | err = -EIO; | 717 | if (op == MLX4_CMD_NOP) { |
| 718 | goto out_reset; | 718 | err = -EBUSY; |
| 719 | goto out; | ||
| 720 | } else { | ||
| 721 | err = -EIO; | ||
| 722 | goto out_reset; | ||
| 723 | } | ||
| 719 | } | 724 | } |
| 720 | 725 | ||
| 721 | err = context->result; | 726 | err = context->result; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3f44e2bbb982..a2ddf3d75ff8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) | |||
| 1102 | struct mlx4_en_priv *priv = netdev_priv(dev); | 1102 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 1103 | 1103 | ||
| 1104 | /* check if requested function is supported by the device */ | 1104 | /* check if requested function is supported by the device */ |
| 1105 | if ((hfunc == ETH_RSS_HASH_TOP && | 1105 | if (hfunc == ETH_RSS_HASH_TOP) { |
| 1106 | !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || | 1106 | if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) |
| 1107 | (hfunc == ETH_RSS_HASH_XOR && | 1107 | return -EINVAL; |
| 1108 | !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) | 1108 | if (!(dev->features & NETIF_F_RXHASH)) |
| 1109 | return -EINVAL; | 1109 | en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); |
| 1110 | return 0; | ||
| 1111 | } else if (hfunc == ETH_RSS_HASH_XOR) { | ||
| 1112 | if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) | ||
| 1113 | return -EINVAL; | ||
| 1114 | if (dev->features & NETIF_F_RXHASH) | ||
| 1115 | en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); | ||
| 1116 | return 0; | ||
| 1117 | } | ||
| 1110 | 1118 | ||
| 1111 | priv->rss_hash_fn = hfunc; | 1119 | return -EINVAL; |
| 1112 | if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) | ||
| 1113 | en_warn(priv, | ||
| 1114 | "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); | ||
| 1115 | if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) | ||
| 1116 | en_warn(priv, | ||
| 1117 | "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); | ||
| 1118 | return 0; | ||
| 1119 | } | 1120 | } |
| 1120 | 1121 | ||
| 1121 | static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, | 1122 | static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, |
| @@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, | |||
| 1189 | priv->prof->rss_rings = rss_rings; | 1190 | priv->prof->rss_rings = rss_rings; |
| 1190 | if (key) | 1191 | if (key) |
| 1191 | memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); | 1192 | memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); |
| 1193 | if (hfunc != ETH_RSS_HASH_NO_CHANGE) | ||
| 1194 | priv->rss_hash_fn = hfunc; | ||
| 1192 | 1195 | ||
| 1193 | if (port_up) { | 1196 | if (port_up) { |
| 1194 | err = mlx4_en_start_port(dev); | 1197 | err = mlx4_en_start_port(dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0f1afc085d58..cf467a9f6cc7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work) | |||
| 1467 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) | 1467 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) |
| 1468 | mlx4_en_ptp_overflow_check(mdev); | 1468 | mlx4_en_ptp_overflow_check(mdev); |
| 1469 | 1469 | ||
| 1470 | mlx4_en_recover_from_oom(priv); | ||
| 1470 | queue_delayed_work(mdev->workqueue, &priv->service_task, | 1471 | queue_delayed_work(mdev->workqueue, &priv->service_task, |
| 1471 | SERVICE_TASK_DELAY); | 1472 | SERVICE_TASK_DELAY); |
| 1472 | } | 1473 | } |
| @@ -1500,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) | |||
| 1500 | { | 1501 | { |
| 1501 | struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; | 1502 | struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; |
| 1502 | int numa_node = priv->mdev->dev->numa_node; | 1503 | int numa_node = priv->mdev->dev->numa_node; |
| 1503 | int ret = 0; | ||
| 1504 | 1504 | ||
| 1505 | if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) | 1505 | if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) |
| 1506 | return -ENOMEM; | 1506 | return -ENOMEM; |
| 1507 | 1507 | ||
| 1508 | ret = cpumask_set_cpu_local_first(ring_idx, numa_node, | 1508 | cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), |
| 1509 | ring->affinity_mask); | 1509 | ring->affinity_mask); |
| 1510 | if (ret) | 1510 | return 0; |
| 1511 | free_cpumask_var(ring->affinity_mask); | ||
| 1512 | |||
| 1513 | return ret; | ||
| 1514 | } | 1511 | } |
| 1515 | 1512 | ||
| 1516 | static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) | 1513 | static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) |
| @@ -1721,7 +1718,7 @@ mac_err: | |||
| 1721 | cq_err: | 1718 | cq_err: |
| 1722 | while (rx_index--) { | 1719 | while (rx_index--) { |
| 1723 | mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); | 1720 | mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); |
| 1724 | mlx4_en_free_affinity_hint(priv, i); | 1721 | mlx4_en_free_affinity_hint(priv, rx_index); |
| 1725 | } | 1722 | } |
| 1726 | for (i = 0; i < priv->rx_ring_num; i++) | 1723 | for (i = 0; i < priv->rx_ring_num; i++) |
| 1727 | mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); | 1724 | mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 54f0e5ab2e55..0a56f010c846 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
| @@ -139,7 +139,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num) | |||
| 139 | int i; | 139 | int i; |
| 140 | int offset = next - start; | 140 | int offset = next - start; |
| 141 | 141 | ||
| 142 | for (i = 0; i <= num; i++) { | 142 | for (i = 0; i < num; i++) { |
| 143 | ret += be64_to_cpu(*curr); | 143 | ret += be64_to_cpu(*curr); |
| 144 | curr += offset; | 144 | curr += offset; |
| 145 | } | 145 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4fdd3c37e47b..2a77a6b19121 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
| @@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | |||
| 244 | return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); | 244 | return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) | ||
| 248 | { | ||
| 249 | BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); | ||
| 250 | return ring->prod == ring->cons; | ||
| 251 | } | ||
| 252 | |||
| 247 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | 253 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) |
| 248 | { | 254 | { |
| 249 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | 255 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); |
| @@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | |||
| 315 | ring->cons, ring->prod); | 321 | ring->cons, ring->prod); |
| 316 | 322 | ||
| 317 | /* Unmap and free Rx buffers */ | 323 | /* Unmap and free Rx buffers */ |
| 318 | BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); | 324 | while (!mlx4_en_is_ring_empty(ring)) { |
| 319 | while (ring->cons != ring->prod) { | ||
| 320 | index = ring->cons & ring->size_mask; | 325 | index = ring->cons & ring->size_mask; |
| 321 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); | 326 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); |
| 322 | mlx4_en_free_rx_desc(priv, ring, index); | 327 | mlx4_en_free_rx_desc(priv, ring, index); |
| @@ -491,6 +496,23 @@ err_allocator: | |||
| 491 | return err; | 496 | return err; |
| 492 | } | 497 | } |
| 493 | 498 | ||
| 499 | /* We recover from out of memory by scheduling our napi poll | ||
| 500 | * function (mlx4_en_process_cq), which tries to allocate | ||
| 501 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). | ||
| 502 | */ | ||
| 503 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | ||
| 504 | { | ||
| 505 | int ring; | ||
| 506 | |||
| 507 | if (!priv->port_up) | ||
| 508 | return; | ||
| 509 | |||
| 510 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | ||
| 511 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) | ||
| 512 | napi_reschedule(&priv->rx_cq[ring]->napi); | ||
| 513 | } | ||
| 514 | } | ||
| 515 | |||
| 494 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | 516 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, |
| 495 | struct mlx4_en_rx_ring **pring, | 517 | struct mlx4_en_rx_ring **pring, |
| 496 | u32 size, u16 stride) | 518 | u32 size, u16 stride) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1783705273d8..7bed3a88579f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
| @@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
| 143 | ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; | 143 | ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; |
| 144 | ring->queue_index = queue_index; | 144 | ring->queue_index = queue_index; |
| 145 | 145 | ||
| 146 | if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) | 146 | if (queue_index < priv->num_tx_rings_p_up) |
| 147 | cpumask_set_cpu(queue_index, &ring->affinity_mask); | 147 | cpumask_set_cpu(cpumask_local_spread(queue_index, |
| 148 | priv->mdev->dev->numa_node), | ||
| 149 | &ring->affinity_mask); | ||
| 148 | 150 | ||
| 149 | *pring = ring; | 151 | *pring = ring; |
| 150 | return 0; | 152 | return 0; |
| @@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
| 213 | 215 | ||
| 214 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | 216 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, |
| 215 | &ring->qp, &ring->qp_state); | 217 | &ring->qp, &ring->qp_state); |
| 216 | if (!user_prio && cpu_online(ring->queue_index)) | 218 | if (!cpumask_empty(&ring->affinity_mask)) |
| 217 | netif_set_xps_queue(priv->dev, &ring->affinity_mask, | 219 | netif_set_xps_queue(priv->dev, &ring->affinity_mask, |
| 218 | ring->queue_index); | 220 | ring->queue_index); |
| 219 | 221 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index a4079811b176..e30bf57ad7a1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); | |||
| 56 | #define MLX4_GET(dest, source, offset) \ | 56 | #define MLX4_GET(dest, source, offset) \ |
| 57 | do { \ | 57 | do { \ |
| 58 | void *__p = (char *) (source) + (offset); \ | 58 | void *__p = (char *) (source) + (offset); \ |
| 59 | u64 val; \ | ||
| 59 | switch (sizeof (dest)) { \ | 60 | switch (sizeof (dest)) { \ |
| 60 | case 1: (dest) = *(u8 *) __p; break; \ | 61 | case 1: (dest) = *(u8 *) __p; break; \ |
| 61 | case 2: (dest) = be16_to_cpup(__p); break; \ | 62 | case 2: (dest) = be16_to_cpup(__p); break; \ |
| 62 | case 4: (dest) = be32_to_cpup(__p); break; \ | 63 | case 4: (dest) = be32_to_cpup(__p); break; \ |
| 63 | case 8: (dest) = be64_to_cpup(__p); break; \ | 64 | case 8: val = get_unaligned((u64 *)__p); \ |
| 65 | (dest) = be64_to_cpu(val); break; \ | ||
| 64 | default: __buggy_use_of_MLX4_GET(); \ | 66 | default: __buggy_use_of_MLX4_GET(); \ |
| 65 | } \ | 67 | } \ |
| 66 | } while (0) | 68 | } while (0) |
| @@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id) | |||
| 1605 | * swaps each 4-byte word before passing it back to | 1607 | * swaps each 4-byte word before passing it back to |
| 1606 | * us. Therefore we need to swab it before printing. | 1608 | * us. Therefore we need to swab it before printing. |
| 1607 | */ | 1609 | */ |
| 1608 | for (i = 0; i < 4; ++i) | 1610 | u32 *bid_u32 = (u32 *)board_id; |
| 1609 | ((u32 *) board_id)[i] = | 1611 | |
| 1610 | swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); | 1612 | for (i = 0; i < 4; ++i) { |
| 1613 | u32 *addr; | ||
| 1614 | u32 val; | ||
| 1615 | |||
| 1616 | addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); | ||
| 1617 | val = get_unaligned(addr); | ||
| 1618 | val = swab32(val); | ||
| 1619 | put_unaligned(val, &bid_u32[i]); | ||
| 1620 | } | ||
| 1611 | } | 1621 | } |
| 1612 | } | 1622 | } |
| 1613 | 1623 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9de30216b146..d021f079f181 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
| 774 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | 774 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, |
| 775 | struct mlx4_en_tx_ring *ring); | 775 | struct mlx4_en_tx_ring *ring); |
| 776 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); | 776 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); |
| 777 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); | ||
| 777 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | 778 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
| 778 | struct mlx4_en_rx_ring **pring, | 779 | struct mlx4_en_rx_ring **pring, |
| 779 | u32 size, u16 stride, int node); | 780 | u32 size, u16 stride, int node); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index c7f28bf4b8e2..bafe2180cf0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -2845,7 +2845,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 2845 | { | 2845 | { |
| 2846 | int err; | 2846 | int err; |
| 2847 | int eqn = vhcr->in_modifier; | 2847 | int eqn = vhcr->in_modifier; |
| 2848 | int res_id = (slave << 8) | eqn; | 2848 | int res_id = (slave << 10) | eqn; |
| 2849 | struct mlx4_eq_context *eqc = inbox->buf; | 2849 | struct mlx4_eq_context *eqc = inbox->buf; |
| 2850 | int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; | 2850 | int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; |
| 2851 | int mtt_size = eq_get_mtt_size(eqc); | 2851 | int mtt_size = eq_get_mtt_size(eqc); |
| @@ -3051,7 +3051,7 @@ int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3051 | struct mlx4_cmd_info *cmd) | 3051 | struct mlx4_cmd_info *cmd) |
| 3052 | { | 3052 | { |
| 3053 | int eqn = vhcr->in_modifier; | 3053 | int eqn = vhcr->in_modifier; |
| 3054 | int res_id = eqn | (slave << 8); | 3054 | int res_id = eqn | (slave << 10); |
| 3055 | struct res_eq *eq; | 3055 | struct res_eq *eq; |
| 3056 | int err; | 3056 | int err; |
| 3057 | 3057 | ||
| @@ -3108,7 +3108,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) | |||
| 3108 | return 0; | 3108 | return 0; |
| 3109 | 3109 | ||
| 3110 | mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); | 3110 | mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); |
| 3111 | res_id = (slave << 8) | event_eq->eqn; | 3111 | res_id = (slave << 10) | event_eq->eqn; |
| 3112 | err = get_res(dev, slave, res_id, RES_EQ, &req); | 3112 | err = get_res(dev, slave, res_id, RES_EQ, &req); |
| 3113 | if (err) | 3113 | if (err) |
| 3114 | goto unlock; | 3114 | goto unlock; |
| @@ -3131,7 +3131,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) | |||
| 3131 | 3131 | ||
| 3132 | memcpy(mailbox->buf, (u8 *) eqe, 28); | 3132 | memcpy(mailbox->buf, (u8 *) eqe, 28); |
| 3133 | 3133 | ||
| 3134 | in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); | 3134 | in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16); |
| 3135 | 3135 | ||
| 3136 | err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, | 3136 | err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, |
| 3137 | MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, | 3137 | MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, |
| @@ -3157,7 +3157,7 @@ int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3157 | struct mlx4_cmd_info *cmd) | 3157 | struct mlx4_cmd_info *cmd) |
| 3158 | { | 3158 | { |
| 3159 | int eqn = vhcr->in_modifier; | 3159 | int eqn = vhcr->in_modifier; |
| 3160 | int res_id = eqn | (slave << 8); | 3160 | int res_id = eqn | (slave << 10); |
| 3161 | struct res_eq *eq; | 3161 | struct res_eq *eq; |
| 3162 | int err; | 3162 | int err; |
| 3163 | 3163 | ||
| @@ -3187,7 +3187,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3187 | int cqn = vhcr->in_modifier; | 3187 | int cqn = vhcr->in_modifier; |
| 3188 | struct mlx4_cq_context *cqc = inbox->buf; | 3188 | struct mlx4_cq_context *cqc = inbox->buf; |
| 3189 | int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; | 3189 | int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; |
| 3190 | struct res_cq *cq; | 3190 | struct res_cq *cq = NULL; |
| 3191 | struct res_mtt *mtt; | 3191 | struct res_mtt *mtt; |
| 3192 | 3192 | ||
| 3193 | err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); | 3193 | err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); |
| @@ -3223,7 +3223,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3223 | { | 3223 | { |
| 3224 | int err; | 3224 | int err; |
| 3225 | int cqn = vhcr->in_modifier; | 3225 | int cqn = vhcr->in_modifier; |
| 3226 | struct res_cq *cq; | 3226 | struct res_cq *cq = NULL; |
| 3227 | 3227 | ||
| 3228 | err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); | 3228 | err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); |
| 3229 | if (err) | 3229 | if (err) |
| @@ -3362,7 +3362,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3362 | int err; | 3362 | int err; |
| 3363 | int srqn = vhcr->in_modifier; | 3363 | int srqn = vhcr->in_modifier; |
| 3364 | struct res_mtt *mtt; | 3364 | struct res_mtt *mtt; |
| 3365 | struct res_srq *srq; | 3365 | struct res_srq *srq = NULL; |
| 3366 | struct mlx4_srq_context *srqc = inbox->buf; | 3366 | struct mlx4_srq_context *srqc = inbox->buf; |
| 3367 | int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; | 3367 | int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; |
| 3368 | 3368 | ||
| @@ -3406,7 +3406,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, | |||
| 3406 | { | 3406 | { |
| 3407 | int err; | 3407 | int err; |
| 3408 | int srqn = vhcr->in_modifier; | 3408 | int srqn = vhcr->in_modifier; |
| 3409 | struct res_srq *srq; | 3409 | struct res_srq *srq = NULL; |
| 3410 | 3410 | ||
| 3411 | err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); | 3411 | err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); |
| 3412 | if (err) | 3412 | if (err) |
| @@ -4714,13 +4714,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) | |||
| 4714 | break; | 4714 | break; |
| 4715 | 4715 | ||
| 4716 | case RES_EQ_HW: | 4716 | case RES_EQ_HW: |
| 4717 | err = mlx4_cmd(dev, slave, eqn & 0xff, | 4717 | err = mlx4_cmd(dev, slave, eqn & 0x3ff, |
| 4718 | 1, MLX4_CMD_HW2SW_EQ, | 4718 | 1, MLX4_CMD_HW2SW_EQ, |
| 4719 | MLX4_CMD_TIME_CLASS_A, | 4719 | MLX4_CMD_TIME_CLASS_A, |
| 4720 | MLX4_CMD_NATIVE); | 4720 | MLX4_CMD_NATIVE); |
| 4721 | if (err) | 4721 | if (err) |
| 4722 | mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", | 4722 | mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", |
| 4723 | slave, eqn); | 4723 | slave, eqn & 0x3ff); |
| 4724 | atomic_dec(&eq->mtt->ref_count); | 4724 | atomic_dec(&eq->mtt->ref_count); |
| 4725 | state = RES_EQ_RESERVED; | 4725 | state = RES_EQ_RESERVED; |
| 4726 | break; | 4726 | break; |
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1412f5af05ec..2bae50292dcd 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
| @@ -69,11 +69,7 @@ | |||
| 69 | #include <net/ip.h> | 69 | #include <net/ip.h> |
| 70 | #include <net/tcp.h> | 70 | #include <net/tcp.h> |
| 71 | #include <asm/byteorder.h> | 71 | #include <asm/byteorder.h> |
| 72 | #include <asm/io.h> | ||
| 73 | #include <asm/processor.h> | 72 | #include <asm/processor.h> |
| 74 | #ifdef CONFIG_MTRR | ||
| 75 | #include <asm/mtrr.h> | ||
| 76 | #endif | ||
| 77 | #include <net/busy_poll.h> | 73 | #include <net/busy_poll.h> |
| 78 | 74 | ||
| 79 | #include "myri10ge_mcp.h" | 75 | #include "myri10ge_mcp.h" |
| @@ -242,8 +238,7 @@ struct myri10ge_priv { | |||
| 242 | unsigned int rdma_tags_available; | 238 | unsigned int rdma_tags_available; |
| 243 | int intr_coal_delay; | 239 | int intr_coal_delay; |
| 244 | __be32 __iomem *intr_coal_delay_ptr; | 240 | __be32 __iomem *intr_coal_delay_ptr; |
| 245 | int mtrr; | 241 | int wc_cookie; |
| 246 | int wc_enabled; | ||
| 247 | int down_cnt; | 242 | int down_cnt; |
| 248 | wait_queue_head_t down_wq; | 243 | wait_queue_head_t down_wq; |
| 249 | struct work_struct watchdog_work; | 244 | struct work_struct watchdog_work; |
| @@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { | |||
| 1905 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | 1900 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", |
| 1906 | "tx_heartbeat_errors", "tx_window_errors", | 1901 | "tx_heartbeat_errors", "tx_window_errors", |
| 1907 | /* device-specific stats */ | 1902 | /* device-specific stats */ |
| 1908 | "tx_boundary", "WC", "irq", "MSI", "MSIX", | 1903 | "tx_boundary", "irq", "MSI", "MSIX", |
| 1909 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", | 1904 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", |
| 1910 | "serial_number", "watchdog_resets", | 1905 | "serial_number", "watchdog_resets", |
| 1911 | #ifdef CONFIG_MYRI10GE_DCA | 1906 | #ifdef CONFIG_MYRI10GE_DCA |
| @@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
| 1984 | data[i] = ((u64 *)&link_stats)[i]; | 1979 | data[i] = ((u64 *)&link_stats)[i]; |
| 1985 | 1980 | ||
| 1986 | data[i++] = (unsigned int)mgp->tx_boundary; | 1981 | data[i++] = (unsigned int)mgp->tx_boundary; |
| 1987 | data[i++] = (unsigned int)mgp->wc_enabled; | ||
| 1988 | data[i++] = (unsigned int)mgp->pdev->irq; | 1982 | data[i++] = (unsigned int)mgp->pdev->irq; |
| 1989 | data[i++] = (unsigned int)mgp->msi_enabled; | 1983 | data[i++] = (unsigned int)mgp->msi_enabled; |
| 1990 | data[i++] = (unsigned int)mgp->msix_enabled; | 1984 | data[i++] = (unsigned int)mgp->msix_enabled; |
| @@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4040 | 4034 | ||
| 4041 | mgp->board_span = pci_resource_len(pdev, 0); | 4035 | mgp->board_span = pci_resource_len(pdev, 0); |
| 4042 | mgp->iomem_base = pci_resource_start(pdev, 0); | 4036 | mgp->iomem_base = pci_resource_start(pdev, 0); |
| 4043 | mgp->mtrr = -1; | 4037 | mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span); |
| 4044 | mgp->wc_enabled = 0; | ||
| 4045 | #ifdef CONFIG_MTRR | ||
| 4046 | mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, | ||
| 4047 | MTRR_TYPE_WRCOMB, 1); | ||
| 4048 | if (mgp->mtrr >= 0) | ||
| 4049 | mgp->wc_enabled = 1; | ||
| 4050 | #endif | ||
| 4051 | mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); | 4038 | mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); |
| 4052 | if (mgp->sram == NULL) { | 4039 | if (mgp->sram == NULL) { |
| 4053 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", | 4040 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", |
| @@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4146 | goto abort_with_state; | 4133 | goto abort_with_state; |
| 4147 | } | 4134 | } |
| 4148 | if (mgp->msix_enabled) | 4135 | if (mgp->msix_enabled) |
| 4149 | dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", | 4136 | dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", |
| 4150 | mgp->num_slices, mgp->tx_boundary, mgp->fw_name, | 4137 | mgp->num_slices, mgp->tx_boundary, mgp->fw_name, |
| 4151 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | 4138 | (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); |
| 4152 | else | 4139 | else |
| 4153 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | 4140 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", |
| 4154 | mgp->msi_enabled ? "MSI" : "xPIC", | 4141 | mgp->msi_enabled ? "MSI" : "xPIC", |
| 4155 | pdev->irq, mgp->tx_boundary, mgp->fw_name, | 4142 | pdev->irq, mgp->tx_boundary, mgp->fw_name, |
| 4156 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | 4143 | (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); |
| 4157 | 4144 | ||
| 4158 | board_number++; | 4145 | board_number++; |
| 4159 | return 0; | 4146 | return 0; |
| @@ -4175,10 +4162,7 @@ abort_with_ioremap: | |||
| 4175 | iounmap(mgp->sram); | 4162 | iounmap(mgp->sram); |
| 4176 | 4163 | ||
| 4177 | abort_with_mtrr: | 4164 | abort_with_mtrr: |
| 4178 | #ifdef CONFIG_MTRR | 4165 | arch_phys_wc_del(mgp->wc_cookie); |
| 4179 | if (mgp->mtrr >= 0) | ||
| 4180 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | ||
| 4181 | #endif | ||
| 4182 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 4166 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
| 4183 | mgp->cmd, mgp->cmd_bus); | 4167 | mgp->cmd, mgp->cmd_bus); |
| 4184 | 4168 | ||
| @@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
| 4220 | pci_restore_state(pdev); | 4204 | pci_restore_state(pdev); |
| 4221 | 4205 | ||
| 4222 | iounmap(mgp->sram); | 4206 | iounmap(mgp->sram); |
| 4223 | 4207 | arch_phys_wc_del(mgp->wc_cookie); | |
| 4224 | #ifdef CONFIG_MTRR | ||
| 4225 | if (mgp->mtrr >= 0) | ||
| 4226 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | ||
| 4227 | #endif | ||
| 4228 | myri10ge_free_slices(mgp); | 4208 | myri10ge_free_slices(mgp); |
| 4229 | kfree(mgp->msix_vectors); | 4209 | kfree(mgp->msix_vectors); |
| 4230 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 4210 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 5c4068353f66..7b43a3b4abdc 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
| @@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
| 135 | int i, j; | 135 | int i, j; |
| 136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; | 136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; |
| 137 | 137 | ||
| 138 | spin_lock(&adapter->tx_clean_lock); | 138 | spin_lock_bh(&adapter->tx_clean_lock); |
| 139 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
| 140 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
| 141 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
| @@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
| 159 | } | 159 | } |
| 160 | cmd_buf++; | 160 | cmd_buf++; |
| 161 | } | 161 | } |
| 162 | spin_unlock(&adapter->tx_clean_lock); | 162 | spin_unlock_bh(&adapter->tx_clean_lock); |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | void netxen_free_sw_resources(struct netxen_adapter *adapter) | 165 | void netxen_free_sw_resources(struct netxen_adapter *adapter) |
| @@ -1764,7 +1764,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
| 1764 | int done = 0; | 1764 | int done = 0; |
| 1765 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; | 1765 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; |
| 1766 | 1766 | ||
| 1767 | if (!spin_trylock(&adapter->tx_clean_lock)) | 1767 | if (!spin_trylock_bh(&adapter->tx_clean_lock)) |
| 1768 | return 1; | 1768 | return 1; |
| 1769 | 1769 | ||
| 1770 | sw_consumer = tx_ring->sw_consumer; | 1770 | sw_consumer = tx_ring->sw_consumer; |
| @@ -1819,7 +1819,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
| 1819 | */ | 1819 | */ |
| 1820 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); | 1820 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
| 1821 | done = (sw_consumer == hw_consumer); | 1821 | done = (sw_consumer == hw_consumer); |
| 1822 | spin_unlock(&adapter->tx_clean_lock); | 1822 | spin_unlock_bh(&adapter->tx_clean_lock); |
| 1823 | 1823 | ||
| 1824 | return done; | 1824 | return done; |
| 1825 | } | 1825 | } |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index e0c31e3947d1..6409a06bbdf6 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | |||
| @@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, | |||
| 3025 | u8 dw, rows, cols, banks, ranks; | 3025 | u8 dw, rows, cols, banks, ranks; |
| 3026 | u32 val; | 3026 | u32 val; |
| 3027 | 3027 | ||
| 3028 | if (size != sizeof(struct netxen_dimm_cfg)) { | 3028 | if (size < attr->size) { |
| 3029 | netdev_err(netdev, "Invalid size\n"); | 3029 | netdev_err(netdev, "Invalid size\n"); |
| 3030 | return -1; | 3030 | return -EINVAL; |
| 3031 | } | 3031 | } |
| 3032 | 3032 | ||
| 3033 | memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); | 3033 | memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); |
| @@ -3137,7 +3137,7 @@ out: | |||
| 3137 | 3137 | ||
| 3138 | static struct bin_attribute bin_attr_dimm = { | 3138 | static struct bin_attribute bin_attr_dimm = { |
| 3139 | .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, | 3139 | .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, |
| 3140 | .size = 0, | 3140 | .size = sizeof(struct netxen_dimm_cfg), |
| 3141 | .read = netxen_sysfs_read_dimm, | 3141 | .read = netxen_sysfs_read_dimm, |
| 3142 | }; | 3142 | }; |
| 3143 | 3143 | ||
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index f66641d961e3..6af028d5f9bc 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
| @@ -912,6 +912,8 @@ qca_spi_probe(struct spi_device *spi_device) | |||
| 912 | qca->spi_dev = spi_device; | 912 | qca->spi_dev = spi_device; |
| 913 | qca->legacy_mode = legacy_mode; | 913 | qca->legacy_mode = legacy_mode; |
| 914 | 914 | ||
| 915 | spi_set_drvdata(spi_device, qcaspi_devs); | ||
| 916 | |||
| 915 | mac = of_get_mac_address(spi_device->dev.of_node); | 917 | mac = of_get_mac_address(spi_device->dev.of_node); |
| 916 | 918 | ||
| 917 | if (mac) | 919 | if (mac) |
| @@ -944,8 +946,6 @@ qca_spi_probe(struct spi_device *spi_device) | |||
| 944 | return -EFAULT; | 946 | return -EFAULT; |
| 945 | } | 947 | } |
| 946 | 948 | ||
| 947 | spi_set_drvdata(spi_device, qcaspi_devs); | ||
| 948 | |||
| 949 | qcaspi_init_device_debugfs(qca); | 949 | qcaspi_init_device_debugfs(qca); |
| 950 | 950 | ||
| 951 | return 0; | 951 | return 0; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c70ab40d8698..3df51faf18ae 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -6884,7 +6884,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp, | |||
| 6884 | rtl8169_start_xmit(nskb, tp->dev); | 6884 | rtl8169_start_xmit(nskb, tp->dev); |
| 6885 | } while (segs); | 6885 | } while (segs); |
| 6886 | 6886 | ||
| 6887 | dev_kfree_skb(skb); | 6887 | dev_consume_skb_any(skb); |
| 6888 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 6888 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 6889 | if (skb_checksum_help(skb) < 0) | 6889 | if (skb_checksum_help(skb) < 0) |
| 6890 | goto drop; | 6890 | goto drop; |
| @@ -6896,7 +6896,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp, | |||
| 6896 | drop: | 6896 | drop: |
| 6897 | stats = &tp->dev->stats; | 6897 | stats = &tp->dev->stats; |
| 6898 | stats->tx_dropped++; | 6898 | stats->tx_dropped++; |
| 6899 | dev_kfree_skb(skb); | 6899 | dev_kfree_skb_any(skb); |
| 6900 | } | 6900 | } |
| 6901 | } | 6901 | } |
| 6902 | 6902 | ||
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a570a60533be..cf98cc9bbc8d 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
| @@ -2921,10 +2921,11 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, | |||
| 2921 | struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); | 2921 | struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); |
| 2922 | int err = 0; | 2922 | int err = 0; |
| 2923 | 2923 | ||
| 2924 | if (!n) | 2924 | if (!n) { |
| 2925 | n = neigh_create(&arp_tbl, &ip_addr, dev); | 2925 | n = neigh_create(&arp_tbl, &ip_addr, dev); |
| 2926 | if (!n) | 2926 | if (IS_ERR(n)) |
| 2927 | return -ENOMEM; | 2927 | return IS_ERR(n); |
| 2928 | } | ||
| 2928 | 2929 | ||
| 2929 | /* If the neigh is already resolved, then go ahead and | 2930 | /* If the neigh is already resolved, then go ahead and |
| 2930 | * install the entry, otherwise start the ARP process to | 2931 | * install the entry, otherwise start the ARP process to |
| @@ -2936,6 +2937,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, | |||
| 2936 | else | 2937 | else |
| 2937 | neigh_event_send(n, NULL); | 2938 | neigh_event_send(n, NULL); |
| 2938 | 2939 | ||
| 2940 | neigh_release(n); | ||
| 2939 | return err; | 2941 | return err; |
| 2940 | } | 2942 | } |
| 2941 | 2943 | ||
| @@ -4176,14 +4178,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev, | |||
| 4176 | 4178 | ||
| 4177 | static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 4179 | static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
| 4178 | struct net_device *dev, | 4180 | struct net_device *dev, |
| 4179 | u32 filter_mask) | 4181 | u32 filter_mask, int nlflags) |
| 4180 | { | 4182 | { |
| 4181 | struct rocker_port *rocker_port = netdev_priv(dev); | 4183 | struct rocker_port *rocker_port = netdev_priv(dev); |
| 4182 | u16 mode = BRIDGE_MODE_UNDEF; | 4184 | u16 mode = BRIDGE_MODE_UNDEF; |
| 4183 | u32 mask = BR_LEARNING | BR_LEARNING_SYNC; | 4185 | u32 mask = BR_LEARNING | BR_LEARNING_SYNC; |
| 4184 | 4186 | ||
| 4185 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, | 4187 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, |
| 4186 | rocker_port->brport_flags, mask); | 4188 | rocker_port->brport_flags, mask, |
| 4189 | nlflags); | ||
| 4187 | } | 4190 | } |
| 4188 | 4191 | ||
| 4189 | static int rocker_port_get_phys_port_name(struct net_device *dev, | 4192 | static int rocker_port_get_phys_port_name(struct net_device *dev, |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index c0ad95d2f63d..809ea4610a77 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
| @@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
| 224 | } | 224 | } |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) | 227 | static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, |
| 228 | struct efx_rx_buffer *rx_buf, | ||
| 229 | unsigned int num_bufs) | ||
| 228 | { | 230 | { |
| 229 | if (rx_buf->page) { | 231 | do { |
| 230 | put_page(rx_buf->page); | 232 | if (rx_buf->page) { |
| 231 | rx_buf->page = NULL; | 233 | put_page(rx_buf->page); |
| 232 | } | 234 | rx_buf->page = NULL; |
| 235 | } | ||
| 236 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); | ||
| 237 | } while (--num_bufs); | ||
| 233 | } | 238 | } |
| 234 | 239 | ||
| 235 | /* Attempt to recycle the page if there is an RX recycle ring; the page can | 240 | /* Attempt to recycle the page if there is an RX recycle ring; the page can |
| @@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | |||
| 278 | /* If this is the last buffer in a page, unmap and free it. */ | 283 | /* If this is the last buffer in a page, unmap and free it. */ |
| 279 | if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { | 284 | if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { |
| 280 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | 285 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); |
| 281 | efx_free_rx_buffer(rx_buf); | 286 | efx_free_rx_buffers(rx_queue, rx_buf, 1); |
| 282 | } | 287 | } |
| 283 | rx_buf->page = NULL; | 288 | rx_buf->page = NULL; |
| 284 | } | 289 | } |
| @@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, | |||
| 304 | 309 | ||
| 305 | efx_recycle_rx_pages(channel, rx_buf, n_frags); | 310 | efx_recycle_rx_pages(channel, rx_buf, n_frags); |
| 306 | 311 | ||
| 307 | do { | 312 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
| 308 | efx_free_rx_buffer(rx_buf); | ||
| 309 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); | ||
| 310 | } while (--n_frags); | ||
| 311 | } | 313 | } |
| 312 | 314 | ||
| 313 | /** | 315 | /** |
| @@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, | |||
| 431 | 433 | ||
| 432 | skb = napi_get_frags(napi); | 434 | skb = napi_get_frags(napi); |
| 433 | if (unlikely(!skb)) { | 435 | if (unlikely(!skb)) { |
| 434 | while (n_frags--) { | 436 | struct efx_rx_queue *rx_queue; |
| 435 | put_page(rx_buf->page); | 437 | |
| 436 | rx_buf->page = NULL; | 438 | rx_queue = efx_channel_get_rx_queue(channel); |
| 437 | rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); | 439 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
| 438 | } | ||
| 439 | return; | 440 | return; |
| 440 | } | 441 | } |
| 441 | 442 | ||
| @@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, | |||
| 622 | 623 | ||
| 623 | skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); | 624 | skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); |
| 624 | if (unlikely(skb == NULL)) { | 625 | if (unlikely(skb == NULL)) { |
| 625 | efx_free_rx_buffer(rx_buf); | 626 | struct efx_rx_queue *rx_queue; |
| 627 | |||
| 628 | rx_queue = efx_channel_get_rx_queue(channel); | ||
| 629 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); | ||
| 626 | return; | 630 | return; |
| 627 | } | 631 | } |
| 628 | skb_record_rx_queue(skb, channel->rx_queue.core_index); | 632 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
| @@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel) | |||
| 661 | * loopback layer, and free the rx_buf here | 665 | * loopback layer, and free the rx_buf here |
| 662 | */ | 666 | */ |
| 663 | if (unlikely(efx->loopback_selftest)) { | 667 | if (unlikely(efx->loopback_selftest)) { |
| 668 | struct efx_rx_queue *rx_queue; | ||
| 669 | |||
| 664 | efx_loopback_rx_packet(efx, eh, rx_buf->len); | 670 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
| 665 | efx_free_rx_buffer(rx_buf); | 671 | rx_queue = efx_channel_get_rx_queue(channel); |
| 672 | efx_free_rx_buffers(rx_queue, rx_buf, | ||
| 673 | channel->rx_pkt_n_frags); | ||
| 666 | goto out; | 674 | goto out; |
| 667 | } | 675 | } |
| 668 | 676 | ||
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 14b363a25c02..630f0b7800e4 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c | |||
| @@ -2238,9 +2238,10 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
| 2238 | const struct of_device_id *match = NULL; | 2238 | const struct of_device_id *match = NULL; |
| 2239 | struct smc_local *lp; | 2239 | struct smc_local *lp; |
| 2240 | struct net_device *ndev; | 2240 | struct net_device *ndev; |
| 2241 | struct resource *res, *ires; | 2241 | struct resource *res; |
| 2242 | unsigned int __iomem *addr; | 2242 | unsigned int __iomem *addr; |
| 2243 | unsigned long irq_flags = SMC_IRQ_FLAGS; | 2243 | unsigned long irq_flags = SMC_IRQ_FLAGS; |
| 2244 | unsigned long irq_resflags; | ||
| 2244 | int ret; | 2245 | int ret; |
| 2245 | 2246 | ||
| 2246 | ndev = alloc_etherdev(sizeof(struct smc_local)); | 2247 | ndev = alloc_etherdev(sizeof(struct smc_local)); |
| @@ -2332,16 +2333,19 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
| 2332 | goto out_free_netdev; | 2333 | goto out_free_netdev; |
| 2333 | } | 2334 | } |
| 2334 | 2335 | ||
| 2335 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 2336 | ndev->irq = platform_get_irq(pdev, 0); |
| 2336 | if (!ires) { | 2337 | if (ndev->irq <= 0) { |
| 2337 | ret = -ENODEV; | 2338 | ret = -ENODEV; |
| 2338 | goto out_release_io; | 2339 | goto out_release_io; |
| 2339 | } | 2340 | } |
| 2340 | 2341 | /* | |
| 2341 | ndev->irq = ires->start; | 2342 | * If this platform does not specify any special irqflags, or if |
| 2342 | 2343 | * the resource supplies a trigger, override the irqflags with | |
| 2343 | if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) | 2344 | * the trigger flags from the resource. |
| 2344 | irq_flags = ires->flags & IRQF_TRIGGER_MASK; | 2345 | */ |
| 2346 | irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); | ||
| 2347 | if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) | ||
| 2348 | irq_flags = irq_resflags & IRQF_TRIGGER_MASK; | ||
| 2345 | 2349 | ||
| 2346 | ret = smc_request_attrib(pdev, ndev); | 2350 | ret = smc_request_attrib(pdev, ndev); |
| 2347 | if (ret) | 2351 | if (ret) |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 41047c9143d0..959aeeade0c9 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
| @@ -2418,9 +2418,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
| 2418 | struct net_device *dev; | 2418 | struct net_device *dev; |
| 2419 | struct smsc911x_data *pdata; | 2419 | struct smsc911x_data *pdata; |
| 2420 | struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); | 2420 | struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); |
| 2421 | struct resource *res, *irq_res; | 2421 | struct resource *res; |
| 2422 | unsigned int intcfg = 0; | 2422 | unsigned int intcfg = 0; |
| 2423 | int res_size, irq_flags; | 2423 | int res_size, irq, irq_flags; |
| 2424 | int retval; | 2424 | int retval; |
| 2425 | 2425 | ||
| 2426 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 2426 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
| @@ -2434,8 +2434,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
| 2434 | } | 2434 | } |
| 2435 | res_size = resource_size(res); | 2435 | res_size = resource_size(res); |
| 2436 | 2436 | ||
| 2437 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 2437 | irq = platform_get_irq(pdev, 0); |
| 2438 | if (!irq_res) { | 2438 | if (irq <= 0) { |
| 2439 | pr_warn("Could not allocate irq resource\n"); | 2439 | pr_warn("Could not allocate irq resource\n"); |
| 2440 | retval = -ENODEV; | 2440 | retval = -ENODEV; |
| 2441 | goto out_0; | 2441 | goto out_0; |
| @@ -2455,8 +2455,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev) | |||
| 2455 | SET_NETDEV_DEV(dev, &pdev->dev); | 2455 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 2456 | 2456 | ||
| 2457 | pdata = netdev_priv(dev); | 2457 | pdata = netdev_priv(dev); |
| 2458 | dev->irq = irq_res->start; | 2458 | dev->irq = irq; |
| 2459 | irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; | 2459 | irq_flags = irq_get_trigger_type(irq); |
| 2460 | pdata->ioaddr = ioremap_nocache(res->start, res_size); | 2460 | pdata->ioaddr = ioremap_nocache(res->start, res_size); |
| 2461 | 2461 | ||
| 2462 | pdata->dev = dev; | 2462 | pdata->dev = dev; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 2ac9552d1fa3..73bab983edd9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -117,6 +117,12 @@ struct stmmac_priv { | |||
| 117 | int use_riwt; | 117 | int use_riwt; |
| 118 | int irq_wake; | 118 | int irq_wake; |
| 119 | spinlock_t ptp_lock; | 119 | spinlock_t ptp_lock; |
| 120 | |||
| 121 | #ifdef CONFIG_DEBUG_FS | ||
| 122 | struct dentry *dbgfs_dir; | ||
| 123 | struct dentry *dbgfs_rings_status; | ||
| 124 | struct dentry *dbgfs_dma_cap; | ||
| 125 | #endif | ||
| 120 | }; | 126 | }; |
| 121 | 127 | ||
| 122 | int stmmac_mdio_unregister(struct net_device *ndev); | 128 | int stmmac_mdio_unregister(struct net_device *ndev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 05c146f718a3..2c5ce2baca87 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id); | |||
| 118 | 118 | ||
| 119 | #ifdef CONFIG_DEBUG_FS | 119 | #ifdef CONFIG_DEBUG_FS |
| 120 | static int stmmac_init_fs(struct net_device *dev); | 120 | static int stmmac_init_fs(struct net_device *dev); |
| 121 | static void stmmac_exit_fs(void); | 121 | static void stmmac_exit_fs(struct net_device *dev); |
| 122 | #endif | 122 | #endif |
| 123 | 123 | ||
| 124 | #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) | 124 | #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) |
| @@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev) | |||
| 1916 | netif_carrier_off(dev); | 1916 | netif_carrier_off(dev); |
| 1917 | 1917 | ||
| 1918 | #ifdef CONFIG_DEBUG_FS | 1918 | #ifdef CONFIG_DEBUG_FS |
| 1919 | stmmac_exit_fs(); | 1919 | stmmac_exit_fs(dev); |
| 1920 | #endif | 1920 | #endif |
| 1921 | 1921 | ||
| 1922 | stmmac_release_ptp(priv); | 1922 | stmmac_release_ptp(priv); |
| @@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 2508 | 2508 | ||
| 2509 | #ifdef CONFIG_DEBUG_FS | 2509 | #ifdef CONFIG_DEBUG_FS |
| 2510 | static struct dentry *stmmac_fs_dir; | 2510 | static struct dentry *stmmac_fs_dir; |
| 2511 | static struct dentry *stmmac_rings_status; | ||
| 2512 | static struct dentry *stmmac_dma_cap; | ||
| 2513 | 2511 | ||
| 2514 | static void sysfs_display_ring(void *head, int size, int extend_desc, | 2512 | static void sysfs_display_ring(void *head, int size, int extend_desc, |
| 2515 | struct seq_file *seq) | 2513 | struct seq_file *seq) |
| @@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = { | |||
| 2648 | 2646 | ||
| 2649 | static int stmmac_init_fs(struct net_device *dev) | 2647 | static int stmmac_init_fs(struct net_device *dev) |
| 2650 | { | 2648 | { |
| 2651 | /* Create debugfs entries */ | 2649 | struct stmmac_priv *priv = netdev_priv(dev); |
| 2652 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); | 2650 | |
| 2651 | /* Create per netdev entries */ | ||
| 2652 | priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); | ||
| 2653 | 2653 | ||
| 2654 | if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { | 2654 | if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { |
| 2655 | pr_err("ERROR %s, debugfs create directory failed\n", | 2655 | pr_err("ERROR %s/%s, debugfs create directory failed\n", |
| 2656 | STMMAC_RESOURCE_NAME); | 2656 | STMMAC_RESOURCE_NAME, dev->name); |
| 2657 | 2657 | ||
| 2658 | return -ENOMEM; | 2658 | return -ENOMEM; |
| 2659 | } | 2659 | } |
| 2660 | 2660 | ||
| 2661 | /* Entry to report DMA RX/TX rings */ | 2661 | /* Entry to report DMA RX/TX rings */ |
| 2662 | stmmac_rings_status = debugfs_create_file("descriptors_status", | 2662 | priv->dbgfs_rings_status = |
| 2663 | S_IRUGO, stmmac_fs_dir, dev, | 2663 | debugfs_create_file("descriptors_status", S_IRUGO, |
| 2664 | &stmmac_rings_status_fops); | 2664 | priv->dbgfs_dir, dev, |
| 2665 | &stmmac_rings_status_fops); | ||
| 2665 | 2666 | ||
| 2666 | if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { | 2667 | if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { |
| 2667 | pr_info("ERROR creating stmmac ring debugfs file\n"); | 2668 | pr_info("ERROR creating stmmac ring debugfs file\n"); |
| 2668 | debugfs_remove(stmmac_fs_dir); | 2669 | debugfs_remove_recursive(priv->dbgfs_dir); |
| 2669 | 2670 | ||
| 2670 | return -ENOMEM; | 2671 | return -ENOMEM; |
| 2671 | } | 2672 | } |
| 2672 | 2673 | ||
| 2673 | /* Entry to report the DMA HW features */ | 2674 | /* Entry to report the DMA HW features */ |
| 2674 | stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, | 2675 | priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, |
| 2675 | dev, &stmmac_dma_cap_fops); | 2676 | priv->dbgfs_dir, |
| 2677 | dev, &stmmac_dma_cap_fops); | ||
| 2676 | 2678 | ||
| 2677 | if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { | 2679 | if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { |
| 2678 | pr_info("ERROR creating stmmac MMC debugfs file\n"); | 2680 | pr_info("ERROR creating stmmac MMC debugfs file\n"); |
| 2679 | debugfs_remove(stmmac_rings_status); | 2681 | debugfs_remove_recursive(priv->dbgfs_dir); |
| 2680 | debugfs_remove(stmmac_fs_dir); | ||
| 2681 | 2682 | ||
| 2682 | return -ENOMEM; | 2683 | return -ENOMEM; |
| 2683 | } | 2684 | } |
| @@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev) | |||
| 2685 | return 0; | 2686 | return 0; |
| 2686 | } | 2687 | } |
| 2687 | 2688 | ||
| 2688 | static void stmmac_exit_fs(void) | 2689 | static void stmmac_exit_fs(struct net_device *dev) |
| 2689 | { | 2690 | { |
| 2690 | debugfs_remove(stmmac_rings_status); | 2691 | struct stmmac_priv *priv = netdev_priv(dev); |
| 2691 | debugfs_remove(stmmac_dma_cap); | 2692 | |
| 2692 | debugfs_remove(stmmac_fs_dir); | 2693 | debugfs_remove_recursive(priv->dbgfs_dir); |
| 2693 | } | 2694 | } |
| 2694 | #endif /* CONFIG_DEBUG_FS */ | 2695 | #endif /* CONFIG_DEBUG_FS */ |
| 2695 | 2696 | ||
| @@ -3149,6 +3150,35 @@ err: | |||
| 3149 | __setup("stmmaceth=", stmmac_cmdline_opt); | 3150 | __setup("stmmaceth=", stmmac_cmdline_opt); |
| 3150 | #endif /* MODULE */ | 3151 | #endif /* MODULE */ |
| 3151 | 3152 | ||
| 3153 | static int __init stmmac_init(void) | ||
| 3154 | { | ||
| 3155 | #ifdef CONFIG_DEBUG_FS | ||
| 3156 | /* Create debugfs main directory if it doesn't exist yet */ | ||
| 3157 | if (!stmmac_fs_dir) { | ||
| 3158 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); | ||
| 3159 | |||
| 3160 | if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { | ||
| 3161 | pr_err("ERROR %s, debugfs create directory failed\n", | ||
| 3162 | STMMAC_RESOURCE_NAME); | ||
| 3163 | |||
| 3164 | return -ENOMEM; | ||
| 3165 | } | ||
| 3166 | } | ||
| 3167 | #endif | ||
| 3168 | |||
| 3169 | return 0; | ||
| 3170 | } | ||
| 3171 | |||
| 3172 | static void __exit stmmac_exit(void) | ||
| 3173 | { | ||
| 3174 | #ifdef CONFIG_DEBUG_FS | ||
| 3175 | debugfs_remove_recursive(stmmac_fs_dir); | ||
| 3176 | #endif | ||
| 3177 | } | ||
| 3178 | |||
| 3179 | module_init(stmmac_init) | ||
| 3180 | module_exit(stmmac_exit) | ||
| 3181 | |||
| 3152 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); | 3182 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); |
| 3153 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 3183 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
| 3154 | MODULE_LICENSE("GPL"); | 3184 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 705bbdf93940..68aec5c460db 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | *******************************************************************************/ | 23 | *******************************************************************************/ |
| 24 | 24 | ||
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/module.h> | ||
| 26 | #include <linux/io.h> | 27 | #include <linux/io.h> |
| 27 | #include <linux/of.h> | 28 | #include <linux/of.h> |
| 28 | #include <linux/of_net.h> | 29 | #include <linux/of_net.h> |
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2bef655279f3..9b7e0a34c98b 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c | |||
| @@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, | |||
| 1765 | ALE_PORT_STATE, | 1765 | ALE_PORT_STATE, |
| 1766 | ALE_PORT_STATE_FORWARD); | 1766 | ALE_PORT_STATE_FORWARD); |
| 1767 | 1767 | ||
| 1768 | if (ndev && slave->open) | 1768 | if (ndev && slave->open && |
| 1769 | slave->link_interface != SGMII_LINK_MAC_PHY && | ||
| 1770 | slave->link_interface != XGMII_LINK_MAC_PHY) | ||
| 1769 | netif_carrier_on(ndev); | 1771 | netif_carrier_on(ndev); |
| 1770 | } else { | 1772 | } else { |
| 1771 | writel(mac_control, GBE_REG_ADDR(slave, emac_regs, | 1773 | writel(mac_control, GBE_REG_ADDR(slave, emac_regs, |
| @@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, | |||
| 1773 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, | 1775 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, |
| 1774 | ALE_PORT_STATE, | 1776 | ALE_PORT_STATE, |
| 1775 | ALE_PORT_STATE_DISABLE); | 1777 | ALE_PORT_STATE_DISABLE); |
| 1776 | if (ndev) | 1778 | if (ndev && |
| 1779 | slave->link_interface != SGMII_LINK_MAC_PHY && | ||
| 1780 | slave->link_interface != XGMII_LINK_MAC_PHY) | ||
| 1777 | netif_carrier_off(ndev); | 1781 | netif_carrier_off(ndev); |
| 1778 | } | 1782 | } |
| 1779 | 1783 | ||
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 690a4c36b316..af2694dc6f90 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
| @@ -707,8 +707,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 707 | 707 | ||
| 708 | cur_p->app0 |= STS_CTRL_APP0_SOP; | 708 | cur_p->app0 |= STS_CTRL_APP0_SOP; |
| 709 | cur_p->len = skb_headlen(skb); | 709 | cur_p->len = skb_headlen(skb); |
| 710 | cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, | 710 | cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, |
| 711 | DMA_TO_DEVICE); | 711 | skb_headlen(skb), DMA_TO_DEVICE); |
| 712 | cur_p->app4 = (unsigned long)skb; | 712 | cur_p->app4 = (unsigned long)skb; |
| 713 | 713 | ||
| 714 | for (ii = 0; ii < num_frag; ii++) { | 714 | for (ii = 0; ii < num_frag; ii++) { |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a10b31664709..41071d32bc8e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info; | |||
| 128 | struct hv_netvsc_packet { | 128 | struct hv_netvsc_packet { |
| 129 | /* Bookkeeping stuff */ | 129 | /* Bookkeeping stuff */ |
| 130 | u32 status; | 130 | u32 status; |
| 131 | bool part_of_skb; | ||
| 132 | 131 | ||
| 133 | bool is_data_pkt; | 132 | bool is_data_pkt; |
| 134 | bool xmit_more; /* from skb */ | 133 | bool xmit_more; /* from skb */ |
| @@ -612,6 +611,15 @@ struct multi_send_data { | |||
| 612 | u32 count; /* counter of batched packets */ | 611 | u32 count; /* counter of batched packets */ |
| 613 | }; | 612 | }; |
| 614 | 613 | ||
| 614 | /* The context of the netvsc device */ | ||
| 615 | struct net_device_context { | ||
| 616 | /* point back to our device context */ | ||
| 617 | struct hv_device *device_ctx; | ||
| 618 | struct delayed_work dwork; | ||
| 619 | struct work_struct work; | ||
| 620 | u32 msg_enable; /* debug level */ | ||
| 621 | }; | ||
| 622 | |||
| 615 | /* Per netvsc device */ | 623 | /* Per netvsc device */ |
| 616 | struct netvsc_device { | 624 | struct netvsc_device { |
| 617 | struct hv_device *dev; | 625 | struct hv_device *dev; |
| @@ -667,6 +675,9 @@ struct netvsc_device { | |||
| 667 | struct multi_send_data msd[NR_CPUS]; | 675 | struct multi_send_data msd[NR_CPUS]; |
| 668 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ | 676 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ |
| 669 | u32 pkt_align; /* alignment bytes, e.g. 8 */ | 677 | u32 pkt_align; /* alignment bytes, e.g. 8 */ |
| 678 | |||
| 679 | /* The net device context */ | ||
| 680 | struct net_device_context *nd_ctx; | ||
| 670 | }; | 681 | }; |
| 671 | 682 | ||
| 672 | /* NdisInitialize message */ | 683 | /* NdisInitialize message */ |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2e8ad0636b46..ea091bc5ff09 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -826,7 +826,6 @@ int netvsc_send(struct hv_device *device, | |||
| 826 | u16 q_idx = packet->q_idx; | 826 | u16 q_idx = packet->q_idx; |
| 827 | u32 pktlen = packet->total_data_buflen, msd_len = 0; | 827 | u32 pktlen = packet->total_data_buflen, msd_len = 0; |
| 828 | unsigned int section_index = NETVSC_INVALID_INDEX; | 828 | unsigned int section_index = NETVSC_INVALID_INDEX; |
| 829 | struct sk_buff *skb = NULL; | ||
| 830 | unsigned long flag; | 829 | unsigned long flag; |
| 831 | struct multi_send_data *msdp; | 830 | struct multi_send_data *msdp; |
| 832 | struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; | 831 | struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; |
| @@ -889,11 +888,6 @@ int netvsc_send(struct hv_device *device, | |||
| 889 | } else { | 888 | } else { |
| 890 | packet->page_buf_cnt = 0; | 889 | packet->page_buf_cnt = 0; |
| 891 | packet->total_data_buflen += msd_len; | 890 | packet->total_data_buflen += msd_len; |
| 892 | if (!packet->part_of_skb) { | ||
| 893 | skb = (struct sk_buff *)(unsigned long)packet-> | ||
| 894 | send_completion_tid; | ||
| 895 | packet->send_completion_tid = 0; | ||
| 896 | } | ||
| 897 | } | 891 | } |
| 898 | 892 | ||
| 899 | if (msdp->pkt) | 893 | if (msdp->pkt) |
| @@ -929,12 +923,8 @@ int netvsc_send(struct hv_device *device, | |||
| 929 | if (cur_send) | 923 | if (cur_send) |
| 930 | ret = netvsc_send_pkt(cur_send, net_device); | 924 | ret = netvsc_send_pkt(cur_send, net_device); |
| 931 | 925 | ||
| 932 | if (ret != 0) { | 926 | if (ret != 0 && section_index != NETVSC_INVALID_INDEX) |
| 933 | if (section_index != NETVSC_INVALID_INDEX) | 927 | netvsc_free_send_slot(net_device, section_index); |
| 934 | netvsc_free_send_slot(net_device, section_index); | ||
| 935 | } else if (skb) { | ||
| 936 | dev_kfree_skb_any(skb); | ||
| 937 | } | ||
| 938 | 928 | ||
| 939 | return ret; | 929 | return ret; |
| 940 | } | 930 | } |
| @@ -1197,6 +1187,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) | |||
| 1197 | */ | 1187 | */ |
| 1198 | ndev = net_device->ndev; | 1188 | ndev = net_device->ndev; |
| 1199 | 1189 | ||
| 1190 | /* Add netvsc_device context to netvsc_device */ | ||
| 1191 | net_device->nd_ctx = netdev_priv(ndev); | ||
| 1192 | |||
| 1200 | /* Initialize the NetVSC channel extension */ | 1193 | /* Initialize the NetVSC channel extension */ |
| 1201 | init_completion(&net_device->channel_init_wait); | 1194 | init_completion(&net_device->channel_init_wait); |
| 1202 | 1195 | ||
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a3a9d3898a6e..5993c7e2d723 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -40,18 +40,21 @@ | |||
| 40 | 40 | ||
| 41 | #include "hyperv_net.h" | 41 | #include "hyperv_net.h" |
| 42 | 42 | ||
| 43 | struct net_device_context { | ||
| 44 | /* point back to our device context */ | ||
| 45 | struct hv_device *device_ctx; | ||
| 46 | struct delayed_work dwork; | ||
| 47 | struct work_struct work; | ||
| 48 | }; | ||
| 49 | 43 | ||
| 50 | #define RING_SIZE_MIN 64 | 44 | #define RING_SIZE_MIN 64 |
| 51 | static int ring_size = 128; | 45 | static int ring_size = 128; |
| 52 | module_param(ring_size, int, S_IRUGO); | 46 | module_param(ring_size, int, S_IRUGO); |
| 53 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); | 47 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); |
| 54 | 48 | ||
| 49 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | | ||
| 50 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | ||
| 51 | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | | ||
| 52 | NETIF_MSG_TX_ERR; | ||
| 53 | |||
| 54 | static int debug = -1; | ||
| 55 | module_param(debug, int, S_IRUGO); | ||
| 56 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
| 57 | |||
| 55 | static void do_set_multicast(struct work_struct *w) | 58 | static void do_set_multicast(struct work_struct *w) |
| 56 | { | 59 | { |
| 57 | struct net_device_context *ndevctx = | 60 | struct net_device_context *ndevctx = |
| @@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context) | |||
| 235 | struct sk_buff *skb = (struct sk_buff *) | 238 | struct sk_buff *skb = (struct sk_buff *) |
| 236 | (unsigned long)packet->send_completion_tid; | 239 | (unsigned long)packet->send_completion_tid; |
| 237 | 240 | ||
| 238 | if (!packet->part_of_skb) | ||
| 239 | kfree(packet); | ||
| 240 | |||
| 241 | if (skb) | 241 | if (skb) |
| 242 | dev_kfree_skb_any(skb); | 242 | dev_kfree_skb_any(skb); |
| 243 | } | 243 | } |
| @@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
| 389 | u32 net_trans_info; | 389 | u32 net_trans_info; |
| 390 | u32 hash; | 390 | u32 hash; |
| 391 | u32 skb_length; | 391 | u32 skb_length; |
| 392 | u32 head_room; | ||
| 393 | u32 pkt_sz; | 392 | u32 pkt_sz; |
| 394 | struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; | 393 | struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; |
| 395 | 394 | ||
| @@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
| 402 | 401 | ||
| 403 | check_size: | 402 | check_size: |
| 404 | skb_length = skb->len; | 403 | skb_length = skb->len; |
| 405 | head_room = skb_headroom(skb); | ||
| 406 | num_data_pgs = netvsc_get_slots(skb) + 2; | 404 | num_data_pgs = netvsc_get_slots(skb) + 2; |
| 407 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { | 405 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { |
| 408 | net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", | 406 | net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", |
| @@ -421,20 +419,14 @@ check_size: | |||
| 421 | 419 | ||
| 422 | pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; | 420 | pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; |
| 423 | 421 | ||
| 424 | if (head_room < pkt_sz) { | 422 | ret = skb_cow_head(skb, pkt_sz); |
| 425 | packet = kmalloc(pkt_sz, GFP_ATOMIC); | 423 | if (ret) { |
| 426 | if (!packet) { | 424 | netdev_err(net, "unable to alloc hv_netvsc_packet\n"); |
| 427 | /* out of memory, drop packet */ | 425 | ret = -ENOMEM; |
| 428 | netdev_err(net, "unable to alloc hv_netvsc_packet\n"); | 426 | goto drop; |
| 429 | ret = -ENOMEM; | ||
| 430 | goto drop; | ||
| 431 | } | ||
| 432 | packet->part_of_skb = false; | ||
| 433 | } else { | ||
| 434 | /* Use the headroom for building up the packet */ | ||
| 435 | packet = (struct hv_netvsc_packet *)skb->head; | ||
| 436 | packet->part_of_skb = true; | ||
| 437 | } | 427 | } |
| 428 | /* Use the headroom for building up the packet */ | ||
| 429 | packet = (struct hv_netvsc_packet *)skb->head; | ||
| 438 | 430 | ||
| 439 | packet->status = 0; | 431 | packet->status = 0; |
| 440 | packet->xmit_more = skb->xmit_more; | 432 | packet->xmit_more = skb->xmit_more; |
| @@ -591,8 +583,6 @@ drop: | |||
| 591 | net->stats.tx_bytes += skb_length; | 583 | net->stats.tx_bytes += skb_length; |
| 592 | net->stats.tx_packets++; | 584 | net->stats.tx_packets++; |
| 593 | } else { | 585 | } else { |
| 594 | if (packet && !packet->part_of_skb) | ||
| 595 | kfree(packet); | ||
| 596 | if (ret != -EAGAIN) { | 586 | if (ret != -EAGAIN) { |
| 597 | dev_kfree_skb_any(skb); | 587 | dev_kfree_skb_any(skb); |
| 598 | net->stats.tx_dropped++; | 588 | net->stats.tx_dropped++; |
| @@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev, | |||
| 888 | 878 | ||
| 889 | net_device_ctx = netdev_priv(net); | 879 | net_device_ctx = netdev_priv(net); |
| 890 | net_device_ctx->device_ctx = dev; | 880 | net_device_ctx->device_ctx = dev; |
| 881 | net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); | ||
| 882 | if (netif_msg_probe(net_device_ctx)) | ||
| 883 | netdev_dbg(net, "netvsc msg_enable: %d\n", | ||
| 884 | net_device_ctx->msg_enable); | ||
| 885 | |||
| 891 | hv_set_drvdata(dev, net); | 886 | hv_set_drvdata(dev, net); |
| 892 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 887 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
| 893 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 888 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 0d92efefd796..9118cea91882 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev, | |||
| 429 | 429 | ||
| 430 | rndis_msg = pkt->data; | 430 | rndis_msg = pkt->data; |
| 431 | 431 | ||
| 432 | dump_rndis_message(dev, rndis_msg); | 432 | if (netif_msg_rx_err(net_dev->nd_ctx)) |
| 433 | dump_rndis_message(dev, rndis_msg); | ||
| 433 | 434 | ||
| 434 | switch (rndis_msg->ndis_msg_type) { | 435 | switch (rndis_msg->ndis_msg_type) { |
| 435 | case RNDIS_MSG_PACKET: | 436 | case RNDIS_MSG_PACKET: |
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 38026650c038..67d00fbc2e0e 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c | |||
| @@ -85,6 +85,7 @@ struct at86rf230_local { | |||
| 85 | struct ieee802154_hw *hw; | 85 | struct ieee802154_hw *hw; |
| 86 | struct at86rf2xx_chip_data *data; | 86 | struct at86rf2xx_chip_data *data; |
| 87 | struct regmap *regmap; | 87 | struct regmap *regmap; |
| 88 | int slp_tr; | ||
| 88 | 89 | ||
| 89 | struct completion state_complete; | 90 | struct completion state_complete; |
| 90 | struct at86rf230_state_change state; | 91 | struct at86rf230_state_change state; |
| @@ -95,163 +96,164 @@ struct at86rf230_local { | |||
| 95 | unsigned long cal_timeout; | 96 | unsigned long cal_timeout; |
| 96 | s8 max_frame_retries; | 97 | s8 max_frame_retries; |
| 97 | bool is_tx; | 98 | bool is_tx; |
| 99 | bool is_tx_from_off; | ||
| 98 | u8 tx_retry; | 100 | u8 tx_retry; |
| 99 | struct sk_buff *tx_skb; | 101 | struct sk_buff *tx_skb; |
| 100 | struct at86rf230_state_change tx; | 102 | struct at86rf230_state_change tx; |
| 101 | }; | 103 | }; |
| 102 | 104 | ||
| 103 | #define RG_TRX_STATUS (0x01) | 105 | #define RG_TRX_STATUS (0x01) |
| 104 | #define SR_TRX_STATUS 0x01, 0x1f, 0 | 106 | #define SR_TRX_STATUS 0x01, 0x1f, 0 |
| 105 | #define SR_RESERVED_01_3 0x01, 0x20, 5 | 107 | #define SR_RESERVED_01_3 0x01, 0x20, 5 |
| 106 | #define SR_CCA_STATUS 0x01, 0x40, 6 | 108 | #define SR_CCA_STATUS 0x01, 0x40, 6 |
| 107 | #define SR_CCA_DONE 0x01, 0x80, 7 | 109 | #define SR_CCA_DONE 0x01, 0x80, 7 |
| 108 | #define RG_TRX_STATE (0x02) | 110 | #define RG_TRX_STATE (0x02) |
| 109 | #define SR_TRX_CMD 0x02, 0x1f, 0 | 111 | #define SR_TRX_CMD 0x02, 0x1f, 0 |
| 110 | #define SR_TRAC_STATUS 0x02, 0xe0, 5 | 112 | #define SR_TRAC_STATUS 0x02, 0xe0, 5 |
| 111 | #define RG_TRX_CTRL_0 (0x03) | 113 | #define RG_TRX_CTRL_0 (0x03) |
| 112 | #define SR_CLKM_CTRL 0x03, 0x07, 0 | 114 | #define SR_CLKM_CTRL 0x03, 0x07, 0 |
| 113 | #define SR_CLKM_SHA_SEL 0x03, 0x08, 3 | 115 | #define SR_CLKM_SHA_SEL 0x03, 0x08, 3 |
| 114 | #define SR_PAD_IO_CLKM 0x03, 0x30, 4 | 116 | #define SR_PAD_IO_CLKM 0x03, 0x30, 4 |
| 115 | #define SR_PAD_IO 0x03, 0xc0, 6 | 117 | #define SR_PAD_IO 0x03, 0xc0, 6 |
| 116 | #define RG_TRX_CTRL_1 (0x04) | 118 | #define RG_TRX_CTRL_1 (0x04) |
| 117 | #define SR_IRQ_POLARITY 0x04, 0x01, 0 | 119 | #define SR_IRQ_POLARITY 0x04, 0x01, 0 |
| 118 | #define SR_IRQ_MASK_MODE 0x04, 0x02, 1 | 120 | #define SR_IRQ_MASK_MODE 0x04, 0x02, 1 |
| 119 | #define SR_SPI_CMD_MODE 0x04, 0x0c, 2 | 121 | #define SR_SPI_CMD_MODE 0x04, 0x0c, 2 |
| 120 | #define SR_RX_BL_CTRL 0x04, 0x10, 4 | 122 | #define SR_RX_BL_CTRL 0x04, 0x10, 4 |
| 121 | #define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 | 123 | #define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 |
| 122 | #define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 | 124 | #define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 |
| 123 | #define SR_PA_EXT_EN 0x04, 0x80, 7 | 125 | #define SR_PA_EXT_EN 0x04, 0x80, 7 |
| 124 | #define RG_PHY_TX_PWR (0x05) | 126 | #define RG_PHY_TX_PWR (0x05) |
| 125 | #define SR_TX_PWR 0x05, 0x0f, 0 | 127 | #define SR_TX_PWR 0x05, 0x0f, 0 |
| 126 | #define SR_PA_LT 0x05, 0x30, 4 | 128 | #define SR_PA_LT 0x05, 0x30, 4 |
| 127 | #define SR_PA_BUF_LT 0x05, 0xc0, 6 | 129 | #define SR_PA_BUF_LT 0x05, 0xc0, 6 |
| 128 | #define RG_PHY_RSSI (0x06) | 130 | #define RG_PHY_RSSI (0x06) |
| 129 | #define SR_RSSI 0x06, 0x1f, 0 | 131 | #define SR_RSSI 0x06, 0x1f, 0 |
| 130 | #define SR_RND_VALUE 0x06, 0x60, 5 | 132 | #define SR_RND_VALUE 0x06, 0x60, 5 |
| 131 | #define SR_RX_CRC_VALID 0x06, 0x80, 7 | 133 | #define SR_RX_CRC_VALID 0x06, 0x80, 7 |
| 132 | #define RG_PHY_ED_LEVEL (0x07) | 134 | #define RG_PHY_ED_LEVEL (0x07) |
| 133 | #define SR_ED_LEVEL 0x07, 0xff, 0 | 135 | #define SR_ED_LEVEL 0x07, 0xff, 0 |
| 134 | #define RG_PHY_CC_CCA (0x08) | 136 | #define RG_PHY_CC_CCA (0x08) |
| 135 | #define SR_CHANNEL 0x08, 0x1f, 0 | 137 | #define SR_CHANNEL 0x08, 0x1f, 0 |
| 136 | #define SR_CCA_MODE 0x08, 0x60, 5 | 138 | #define SR_CCA_MODE 0x08, 0x60, 5 |
| 137 | #define SR_CCA_REQUEST 0x08, 0x80, 7 | 139 | #define SR_CCA_REQUEST 0x08, 0x80, 7 |
| 138 | #define RG_CCA_THRES (0x09) | 140 | #define RG_CCA_THRES (0x09) |
| 139 | #define SR_CCA_ED_THRES 0x09, 0x0f, 0 | 141 | #define SR_CCA_ED_THRES 0x09, 0x0f, 0 |
| 140 | #define SR_RESERVED_09_1 0x09, 0xf0, 4 | 142 | #define SR_RESERVED_09_1 0x09, 0xf0, 4 |
| 141 | #define RG_RX_CTRL (0x0a) | 143 | #define RG_RX_CTRL (0x0a) |
| 142 | #define SR_PDT_THRES 0x0a, 0x0f, 0 | 144 | #define SR_PDT_THRES 0x0a, 0x0f, 0 |
| 143 | #define SR_RESERVED_0a_1 0x0a, 0xf0, 4 | 145 | #define SR_RESERVED_0a_1 0x0a, 0xf0, 4 |
| 144 | #define RG_SFD_VALUE (0x0b) | 146 | #define RG_SFD_VALUE (0x0b) |
| 145 | #define SR_SFD_VALUE 0x0b, 0xff, 0 | 147 | #define SR_SFD_VALUE 0x0b, 0xff, 0 |
| 146 | #define RG_TRX_CTRL_2 (0x0c) | 148 | #define RG_TRX_CTRL_2 (0x0c) |
| 147 | #define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 | 149 | #define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 |
| 148 | #define SR_SUB_MODE 0x0c, 0x04, 2 | 150 | #define SR_SUB_MODE 0x0c, 0x04, 2 |
| 149 | #define SR_BPSK_QPSK 0x0c, 0x08, 3 | 151 | #define SR_BPSK_QPSK 0x0c, 0x08, 3 |
| 150 | #define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 | 152 | #define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 |
| 151 | #define SR_RESERVED_0c_5 0x0c, 0x60, 5 | 153 | #define SR_RESERVED_0c_5 0x0c, 0x60, 5 |
| 152 | #define SR_RX_SAFE_MODE 0x0c, 0x80, 7 | 154 | #define SR_RX_SAFE_MODE 0x0c, 0x80, 7 |
| 153 | #define RG_ANT_DIV (0x0d) | 155 | #define RG_ANT_DIV (0x0d) |
| 154 | #define SR_ANT_CTRL 0x0d, 0x03, 0 | 156 | #define SR_ANT_CTRL 0x0d, 0x03, 0 |
| 155 | #define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 | 157 | #define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 |
| 156 | #define SR_ANT_DIV_EN 0x0d, 0x08, 3 | 158 | #define SR_ANT_DIV_EN 0x0d, 0x08, 3 |
| 157 | #define SR_RESERVED_0d_2 0x0d, 0x70, 4 | 159 | #define SR_RESERVED_0d_2 0x0d, 0x70, 4 |
| 158 | #define SR_ANT_SEL 0x0d, 0x80, 7 | 160 | #define SR_ANT_SEL 0x0d, 0x80, 7 |
| 159 | #define RG_IRQ_MASK (0x0e) | 161 | #define RG_IRQ_MASK (0x0e) |
| 160 | #define SR_IRQ_MASK 0x0e, 0xff, 0 | 162 | #define SR_IRQ_MASK 0x0e, 0xff, 0 |
| 161 | #define RG_IRQ_STATUS (0x0f) | 163 | #define RG_IRQ_STATUS (0x0f) |
| 162 | #define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 | 164 | #define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 |
| 163 | #define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 | 165 | #define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 |
| 164 | #define SR_IRQ_2_RX_START 0x0f, 0x04, 2 | 166 | #define SR_IRQ_2_RX_START 0x0f, 0x04, 2 |
| 165 | #define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 | 167 | #define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 |
| 166 | #define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 | 168 | #define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 |
| 167 | #define SR_IRQ_5_AMI 0x0f, 0x20, 5 | 169 | #define SR_IRQ_5_AMI 0x0f, 0x20, 5 |
| 168 | #define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 | 170 | #define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 |
| 169 | #define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 | 171 | #define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 |
| 170 | #define RG_VREG_CTRL (0x10) | 172 | #define RG_VREG_CTRL (0x10) |
| 171 | #define SR_RESERVED_10_6 0x10, 0x03, 0 | 173 | #define SR_RESERVED_10_6 0x10, 0x03, 0 |
| 172 | #define SR_DVDD_OK 0x10, 0x04, 2 | 174 | #define SR_DVDD_OK 0x10, 0x04, 2 |
| 173 | #define SR_DVREG_EXT 0x10, 0x08, 3 | 175 | #define SR_DVREG_EXT 0x10, 0x08, 3 |
| 174 | #define SR_RESERVED_10_3 0x10, 0x30, 4 | 176 | #define SR_RESERVED_10_3 0x10, 0x30, 4 |
| 175 | #define SR_AVDD_OK 0x10, 0x40, 6 | 177 | #define SR_AVDD_OK 0x10, 0x40, 6 |
| 176 | #define SR_AVREG_EXT 0x10, 0x80, 7 | 178 | #define SR_AVREG_EXT 0x10, 0x80, 7 |
| 177 | #define RG_BATMON (0x11) | 179 | #define RG_BATMON (0x11) |
| 178 | #define SR_BATMON_VTH 0x11, 0x0f, 0 | 180 | #define SR_BATMON_VTH 0x11, 0x0f, 0 |
| 179 | #define SR_BATMON_HR 0x11, 0x10, 4 | 181 | #define SR_BATMON_HR 0x11, 0x10, 4 |
| 180 | #define SR_BATMON_OK 0x11, 0x20, 5 | 182 | #define SR_BATMON_OK 0x11, 0x20, 5 |
| 181 | #define SR_RESERVED_11_1 0x11, 0xc0, 6 | 183 | #define SR_RESERVED_11_1 0x11, 0xc0, 6 |
| 182 | #define RG_XOSC_CTRL (0x12) | 184 | #define RG_XOSC_CTRL (0x12) |
| 183 | #define SR_XTAL_TRIM 0x12, 0x0f, 0 | 185 | #define SR_XTAL_TRIM 0x12, 0x0f, 0 |
| 184 | #define SR_XTAL_MODE 0x12, 0xf0, 4 | 186 | #define SR_XTAL_MODE 0x12, 0xf0, 4 |
| 185 | #define RG_RX_SYN (0x15) | 187 | #define RG_RX_SYN (0x15) |
| 186 | #define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 | 188 | #define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 |
| 187 | #define SR_RESERVED_15_2 0x15, 0x70, 4 | 189 | #define SR_RESERVED_15_2 0x15, 0x70, 4 |
| 188 | #define SR_RX_PDT_DIS 0x15, 0x80, 7 | 190 | #define SR_RX_PDT_DIS 0x15, 0x80, 7 |
| 189 | #define RG_XAH_CTRL_1 (0x17) | 191 | #define RG_XAH_CTRL_1 (0x17) |
| 190 | #define SR_RESERVED_17_8 0x17, 0x01, 0 | 192 | #define SR_RESERVED_17_8 0x17, 0x01, 0 |
| 191 | #define SR_AACK_PROM_MODE 0x17, 0x02, 1 | 193 | #define SR_AACK_PROM_MODE 0x17, 0x02, 1 |
| 192 | #define SR_AACK_ACK_TIME 0x17, 0x04, 2 | 194 | #define SR_AACK_ACK_TIME 0x17, 0x04, 2 |
| 193 | #define SR_RESERVED_17_5 0x17, 0x08, 3 | 195 | #define SR_RESERVED_17_5 0x17, 0x08, 3 |
| 194 | #define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 | 196 | #define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 |
| 195 | #define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 | 197 | #define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 |
| 196 | #define SR_CSMA_LBT_MODE 0x17, 0x40, 6 | 198 | #define SR_CSMA_LBT_MODE 0x17, 0x40, 6 |
| 197 | #define SR_RESERVED_17_1 0x17, 0x80, 7 | 199 | #define SR_RESERVED_17_1 0x17, 0x80, 7 |
| 198 | #define RG_FTN_CTRL (0x18) | 200 | #define RG_FTN_CTRL (0x18) |
| 199 | #define SR_RESERVED_18_2 0x18, 0x7f, 0 | 201 | #define SR_RESERVED_18_2 0x18, 0x7f, 0 |
| 200 | #define SR_FTN_START 0x18, 0x80, 7 | 202 | #define SR_FTN_START 0x18, 0x80, 7 |
| 201 | #define RG_PLL_CF (0x1a) | 203 | #define RG_PLL_CF (0x1a) |
| 202 | #define SR_RESERVED_1a_2 0x1a, 0x7f, 0 | 204 | #define SR_RESERVED_1a_2 0x1a, 0x7f, 0 |
| 203 | #define SR_PLL_CF_START 0x1a, 0x80, 7 | 205 | #define SR_PLL_CF_START 0x1a, 0x80, 7 |
| 204 | #define RG_PLL_DCU (0x1b) | 206 | #define RG_PLL_DCU (0x1b) |
| 205 | #define SR_RESERVED_1b_3 0x1b, 0x3f, 0 | 207 | #define SR_RESERVED_1b_3 0x1b, 0x3f, 0 |
| 206 | #define SR_RESERVED_1b_2 0x1b, 0x40, 6 | 208 | #define SR_RESERVED_1b_2 0x1b, 0x40, 6 |
| 207 | #define SR_PLL_DCU_START 0x1b, 0x80, 7 | 209 | #define SR_PLL_DCU_START 0x1b, 0x80, 7 |
| 208 | #define RG_PART_NUM (0x1c) | 210 | #define RG_PART_NUM (0x1c) |
| 209 | #define SR_PART_NUM 0x1c, 0xff, 0 | 211 | #define SR_PART_NUM 0x1c, 0xff, 0 |
| 210 | #define RG_VERSION_NUM (0x1d) | 212 | #define RG_VERSION_NUM (0x1d) |
| 211 | #define SR_VERSION_NUM 0x1d, 0xff, 0 | 213 | #define SR_VERSION_NUM 0x1d, 0xff, 0 |
| 212 | #define RG_MAN_ID_0 (0x1e) | 214 | #define RG_MAN_ID_0 (0x1e) |
| 213 | #define SR_MAN_ID_0 0x1e, 0xff, 0 | 215 | #define SR_MAN_ID_0 0x1e, 0xff, 0 |
| 214 | #define RG_MAN_ID_1 (0x1f) | 216 | #define RG_MAN_ID_1 (0x1f) |
| 215 | #define SR_MAN_ID_1 0x1f, 0xff, 0 | 217 | #define SR_MAN_ID_1 0x1f, 0xff, 0 |
| 216 | #define RG_SHORT_ADDR_0 (0x20) | 218 | #define RG_SHORT_ADDR_0 (0x20) |
| 217 | #define SR_SHORT_ADDR_0 0x20, 0xff, 0 | 219 | #define SR_SHORT_ADDR_0 0x20, 0xff, 0 |
| 218 | #define RG_SHORT_ADDR_1 (0x21) | 220 | #define RG_SHORT_ADDR_1 (0x21) |
| 219 | #define SR_SHORT_ADDR_1 0x21, 0xff, 0 | 221 | #define SR_SHORT_ADDR_1 0x21, 0xff, 0 |
| 220 | #define RG_PAN_ID_0 (0x22) | 222 | #define RG_PAN_ID_0 (0x22) |
| 221 | #define SR_PAN_ID_0 0x22, 0xff, 0 | 223 | #define SR_PAN_ID_0 0x22, 0xff, 0 |
| 222 | #define RG_PAN_ID_1 (0x23) | 224 | #define RG_PAN_ID_1 (0x23) |
| 223 | #define SR_PAN_ID_1 0x23, 0xff, 0 | 225 | #define SR_PAN_ID_1 0x23, 0xff, 0 |
| 224 | #define RG_IEEE_ADDR_0 (0x24) | 226 | #define RG_IEEE_ADDR_0 (0x24) |
| 225 | #define SR_IEEE_ADDR_0 0x24, 0xff, 0 | 227 | #define SR_IEEE_ADDR_0 0x24, 0xff, 0 |
| 226 | #define RG_IEEE_ADDR_1 (0x25) | 228 | #define RG_IEEE_ADDR_1 (0x25) |
| 227 | #define SR_IEEE_ADDR_1 0x25, 0xff, 0 | 229 | #define SR_IEEE_ADDR_1 0x25, 0xff, 0 |
| 228 | #define RG_IEEE_ADDR_2 (0x26) | 230 | #define RG_IEEE_ADDR_2 (0x26) |
| 229 | #define SR_IEEE_ADDR_2 0x26, 0xff, 0 | 231 | #define SR_IEEE_ADDR_2 0x26, 0xff, 0 |
| 230 | #define RG_IEEE_ADDR_3 (0x27) | 232 | #define RG_IEEE_ADDR_3 (0x27) |
| 231 | #define SR_IEEE_ADDR_3 0x27, 0xff, 0 | 233 | #define SR_IEEE_ADDR_3 0x27, 0xff, 0 |
| 232 | #define RG_IEEE_ADDR_4 (0x28) | 234 | #define RG_IEEE_ADDR_4 (0x28) |
| 233 | #define SR_IEEE_ADDR_4 0x28, 0xff, 0 | 235 | #define SR_IEEE_ADDR_4 0x28, 0xff, 0 |
| 234 | #define RG_IEEE_ADDR_5 (0x29) | 236 | #define RG_IEEE_ADDR_5 (0x29) |
| 235 | #define SR_IEEE_ADDR_5 0x29, 0xff, 0 | 237 | #define SR_IEEE_ADDR_5 0x29, 0xff, 0 |
| 236 | #define RG_IEEE_ADDR_6 (0x2a) | 238 | #define RG_IEEE_ADDR_6 (0x2a) |
| 237 | #define SR_IEEE_ADDR_6 0x2a, 0xff, 0 | 239 | #define SR_IEEE_ADDR_6 0x2a, 0xff, 0 |
| 238 | #define RG_IEEE_ADDR_7 (0x2b) | 240 | #define RG_IEEE_ADDR_7 (0x2b) |
| 239 | #define SR_IEEE_ADDR_7 0x2b, 0xff, 0 | 241 | #define SR_IEEE_ADDR_7 0x2b, 0xff, 0 |
| 240 | #define RG_XAH_CTRL_0 (0x2c) | 242 | #define RG_XAH_CTRL_0 (0x2c) |
| 241 | #define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 | 243 | #define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 |
| 242 | #define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 | 244 | #define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 |
| 243 | #define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 | 245 | #define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 |
| 244 | #define RG_CSMA_SEED_0 (0x2d) | 246 | #define RG_CSMA_SEED_0 (0x2d) |
| 245 | #define SR_CSMA_SEED_0 0x2d, 0xff, 0 | 247 | #define SR_CSMA_SEED_0 0x2d, 0xff, 0 |
| 246 | #define RG_CSMA_SEED_1 (0x2e) | 248 | #define RG_CSMA_SEED_1 (0x2e) |
| 247 | #define SR_CSMA_SEED_1 0x2e, 0x07, 0 | 249 | #define SR_CSMA_SEED_1 0x2e, 0x07, 0 |
| 248 | #define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 | 250 | #define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 |
| 249 | #define SR_AACK_DIS_ACK 0x2e, 0x10, 4 | 251 | #define SR_AACK_DIS_ACK 0x2e, 0x10, 4 |
| 250 | #define SR_AACK_SET_PD 0x2e, 0x20, 5 | 252 | #define SR_AACK_SET_PD 0x2e, 0x20, 5 |
| 251 | #define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 | 253 | #define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 |
| 252 | #define RG_CSMA_BE (0x2f) | 254 | #define RG_CSMA_BE (0x2f) |
| 253 | #define SR_MIN_BE 0x2f, 0x0f, 0 | 255 | #define SR_MIN_BE 0x2f, 0x0f, 0 |
| 254 | #define SR_MAX_BE 0x2f, 0xf0, 4 | 256 | #define SR_MAX_BE 0x2f, 0xf0, 4 |
| 255 | 257 | ||
| 256 | #define CMD_REG 0x80 | 258 | #define CMD_REG 0x80 |
| 257 | #define CMD_REG_MASK 0x3f | 259 | #define CMD_REG_MASK 0x3f |
| @@ -292,6 +294,8 @@ struct at86rf230_local { | |||
| 292 | #define STATE_BUSY_RX_AACK_NOCLK 0x1E | 294 | #define STATE_BUSY_RX_AACK_NOCLK 0x1E |
| 293 | #define STATE_TRANSITION_IN_PROGRESS 0x1F | 295 | #define STATE_TRANSITION_IN_PROGRESS 0x1F |
| 294 | 296 | ||
| 297 | #define TRX_STATE_MASK (0x1F) | ||
| 298 | |||
| 295 | #define AT86RF2XX_NUMREGS 0x3F | 299 | #define AT86RF2XX_NUMREGS 0x3F |
| 296 | 300 | ||
| 297 | static void | 301 | static void |
| @@ -336,6 +340,14 @@ at86rf230_write_subreg(struct at86rf230_local *lp, | |||
| 336 | return regmap_update_bits(lp->regmap, addr, mask, data << shift); | 340 | return regmap_update_bits(lp->regmap, addr, mask, data << shift); |
| 337 | } | 341 | } |
| 338 | 342 | ||
| 343 | static inline void | ||
| 344 | at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp) | ||
| 345 | { | ||
| 346 | gpio_set_value(lp->slp_tr, 1); | ||
| 347 | udelay(1); | ||
| 348 | gpio_set_value(lp->slp_tr, 0); | ||
| 349 | } | ||
| 350 | |||
| 339 | static bool | 351 | static bool |
| 340 | at86rf230_reg_writeable(struct device *dev, unsigned int reg) | 352 | at86rf230_reg_writeable(struct device *dev, unsigned int reg) |
| 341 | { | 353 | { |
| @@ -509,7 +521,7 @@ at86rf230_async_state_assert(void *context) | |||
| 509 | struct at86rf230_state_change *ctx = context; | 521 | struct at86rf230_state_change *ctx = context; |
| 510 | struct at86rf230_local *lp = ctx->lp; | 522 | struct at86rf230_local *lp = ctx->lp; |
| 511 | const u8 *buf = ctx->buf; | 523 | const u8 *buf = ctx->buf; |
| 512 | const u8 trx_state = buf[1] & 0x1f; | 524 | const u8 trx_state = buf[1] & TRX_STATE_MASK; |
| 513 | 525 | ||
| 514 | /* Assert state change */ | 526 | /* Assert state change */ |
| 515 | if (trx_state != ctx->to_state) { | 527 | if (trx_state != ctx->to_state) { |
| @@ -609,11 +621,17 @@ at86rf230_async_state_delay(void *context) | |||
| 609 | switch (ctx->to_state) { | 621 | switch (ctx->to_state) { |
| 610 | case STATE_RX_AACK_ON: | 622 | case STATE_RX_AACK_ON: |
| 611 | tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); | 623 | tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC); |
| 624 | /* state change from TRX_OFF to RX_AACK_ON to do a | ||
| 625 | * calibration, we need to reset the timeout for the | ||
| 626 | * next one. | ||
| 627 | */ | ||
| 628 | lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; | ||
| 612 | goto change; | 629 | goto change; |
| 630 | case STATE_TX_ARET_ON: | ||
| 613 | case STATE_TX_ON: | 631 | case STATE_TX_ON: |
| 614 | tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); | 632 | tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC); |
| 615 | /* state change from TRX_OFF to TX_ON to do a | 633 | /* state change from TRX_OFF to TX_ON or ARET_ON to do |
| 616 | * calibration, we need to reset the timeout for the | 634 | * a calibration, we need to reset the timeout for the |
| 617 | * next one. | 635 | * next one. |
| 618 | */ | 636 | */ |
| 619 | lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; | 637 | lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; |
| @@ -667,7 +685,7 @@ at86rf230_async_state_change_start(void *context) | |||
| 667 | struct at86rf230_state_change *ctx = context; | 685 | struct at86rf230_state_change *ctx = context; |
| 668 | struct at86rf230_local *lp = ctx->lp; | 686 | struct at86rf230_local *lp = ctx->lp; |
| 669 | u8 *buf = ctx->buf; | 687 | u8 *buf = ctx->buf; |
| 670 | const u8 trx_state = buf[1] & 0x1f; | 688 | const u8 trx_state = buf[1] & TRX_STATE_MASK; |
| 671 | int rc; | 689 | int rc; |
| 672 | 690 | ||
| 673 | /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ | 691 | /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ |
| @@ -773,16 +791,6 @@ at86rf230_tx_on(void *context) | |||
| 773 | } | 791 | } |
| 774 | 792 | ||
| 775 | static void | 793 | static void |
| 776 | at86rf230_tx_trac_error(void *context) | ||
| 777 | { | ||
| 778 | struct at86rf230_state_change *ctx = context; | ||
| 779 | struct at86rf230_local *lp = ctx->lp; | ||
| 780 | |||
| 781 | at86rf230_async_state_change(lp, ctx, STATE_TX_ON, | ||
| 782 | at86rf230_tx_on, true); | ||
| 783 | } | ||
| 784 | |||
| 785 | static void | ||
| 786 | at86rf230_tx_trac_check(void *context) | 794 | at86rf230_tx_trac_check(void *context) |
| 787 | { | 795 | { |
| 788 | struct at86rf230_state_change *ctx = context; | 796 | struct at86rf230_state_change *ctx = context; |
| @@ -791,12 +799,12 @@ at86rf230_tx_trac_check(void *context) | |||
| 791 | const u8 trac = (buf[1] & 0xe0) >> 5; | 799 | const u8 trac = (buf[1] & 0xe0) >> 5; |
| 792 | 800 | ||
| 793 | /* If trac status is different than zero we need to do a state change | 801 | /* If trac status is different than zero we need to do a state change |
| 794 | * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver | 802 | * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the |
| 795 | * state to TX_ON. | 803 | * transceiver. |
| 796 | */ | 804 | */ |
| 797 | if (trac) | 805 | if (trac) |
| 798 | at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, | 806 | at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, |
| 799 | at86rf230_tx_trac_error, true); | 807 | at86rf230_tx_on, true); |
| 800 | else | 808 | else |
| 801 | at86rf230_tx_on(context); | 809 | at86rf230_tx_on(context); |
| 802 | } | 810 | } |
| @@ -941,13 +949,18 @@ at86rf230_write_frame_complete(void *context) | |||
| 941 | u8 *buf = ctx->buf; | 949 | u8 *buf = ctx->buf; |
| 942 | int rc; | 950 | int rc; |
| 943 | 951 | ||
| 944 | buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; | ||
| 945 | buf[1] = STATE_BUSY_TX; | ||
| 946 | ctx->trx.len = 2; | 952 | ctx->trx.len = 2; |
| 947 | ctx->msg.complete = NULL; | 953 | |
| 948 | rc = spi_async(lp->spi, &ctx->msg); | 954 | if (gpio_is_valid(lp->slp_tr)) { |
| 949 | if (rc) | 955 | at86rf230_slp_tr_rising_edge(lp); |
| 950 | at86rf230_async_error(lp, ctx, rc); | 956 | } else { |
| 957 | buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; | ||
| 958 | buf[1] = STATE_BUSY_TX; | ||
| 959 | ctx->msg.complete = NULL; | ||
| 960 | rc = spi_async(lp->spi, &ctx->msg); | ||
| 961 | if (rc) | ||
| 962 | at86rf230_async_error(lp, ctx, rc); | ||
| 963 | } | ||
| 951 | } | 964 | } |
| 952 | 965 | ||
| 953 | static void | 966 | static void |
| @@ -993,12 +1006,21 @@ at86rf230_xmit_start(void *context) | |||
| 993 | * are in STATE_TX_ON. The pfad differs here, so we change | 1006 | * are in STATE_TX_ON. The pfad differs here, so we change |
| 994 | * the complete handler. | 1007 | * the complete handler. |
| 995 | */ | 1008 | */ |
| 996 | if (lp->tx_aret) | 1009 | if (lp->tx_aret) { |
| 997 | at86rf230_async_state_change(lp, ctx, STATE_TX_ON, | 1010 | if (lp->is_tx_from_off) { |
| 998 | at86rf230_xmit_tx_on, false); | 1011 | lp->is_tx_from_off = false; |
| 999 | else | 1012 | at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, |
| 1013 | at86rf230_xmit_tx_on, | ||
| 1014 | false); | ||
| 1015 | } else { | ||
| 1016 | at86rf230_async_state_change(lp, ctx, STATE_TX_ON, | ||
| 1017 | at86rf230_xmit_tx_on, | ||
| 1018 | false); | ||
| 1019 | } | ||
| 1020 | } else { | ||
| 1000 | at86rf230_async_state_change(lp, ctx, STATE_TX_ON, | 1021 | at86rf230_async_state_change(lp, ctx, STATE_TX_ON, |
| 1001 | at86rf230_write_frame, false); | 1022 | at86rf230_write_frame, false); |
| 1023 | } | ||
| 1002 | } | 1024 | } |
| 1003 | 1025 | ||
| 1004 | static int | 1026 | static int |
| @@ -1017,11 +1039,13 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) | |||
| 1017 | * to TX_ON, the lp->cal_timeout should be reinit by state_delay | 1039 | * to TX_ON, the lp->cal_timeout should be reinit by state_delay |
| 1018 | * function then to start in the next 5 minutes. | 1040 | * function then to start in the next 5 minutes. |
| 1019 | */ | 1041 | */ |
| 1020 | if (time_is_before_jiffies(lp->cal_timeout)) | 1042 | if (time_is_before_jiffies(lp->cal_timeout)) { |
| 1043 | lp->is_tx_from_off = true; | ||
| 1021 | at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, | 1044 | at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, |
| 1022 | at86rf230_xmit_start, false); | 1045 | at86rf230_xmit_start, false); |
| 1023 | else | 1046 | } else { |
| 1024 | at86rf230_xmit_start(ctx); | 1047 | at86rf230_xmit_start(ctx); |
| 1048 | } | ||
| 1025 | 1049 | ||
| 1026 | return 0; | 1050 | return 0; |
| 1027 | } | 1051 | } |
| @@ -1037,9 +1061,6 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level) | |||
| 1037 | static int | 1061 | static int |
| 1038 | at86rf230_start(struct ieee802154_hw *hw) | 1062 | at86rf230_start(struct ieee802154_hw *hw) |
| 1039 | { | 1063 | { |
| 1040 | struct at86rf230_local *lp = hw->priv; | ||
| 1041 | |||
| 1042 | lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT; | ||
| 1043 | return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); | 1064 | return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON); |
| 1044 | } | 1065 | } |
| 1045 | 1066 | ||
| @@ -1673,6 +1694,7 @@ static int at86rf230_probe(struct spi_device *spi) | |||
| 1673 | lp = hw->priv; | 1694 | lp = hw->priv; |
| 1674 | lp->hw = hw; | 1695 | lp->hw = hw; |
| 1675 | lp->spi = spi; | 1696 | lp->spi = spi; |
| 1697 | lp->slp_tr = slp_tr; | ||
| 1676 | hw->parent = &spi->dev; | 1698 | hw->parent = &spi->dev; |
| 1677 | hw->vif_data_size = sizeof(*lp); | 1699 | hw->vif_data_size = sizeof(*lp); |
| 1678 | ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); | 1700 | ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b227a13f6473..9f59f17dc317 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
| @@ -599,10 +599,18 @@ static int macvlan_open(struct net_device *dev) | |||
| 599 | goto del_unicast; | 599 | goto del_unicast; |
| 600 | } | 600 | } |
| 601 | 601 | ||
| 602 | if (dev->flags & IFF_PROMISC) { | ||
| 603 | err = dev_set_promiscuity(lowerdev, 1); | ||
| 604 | if (err < 0) | ||
| 605 | goto clear_multi; | ||
| 606 | } | ||
| 607 | |||
| 602 | hash_add: | 608 | hash_add: |
| 603 | macvlan_hash_add(vlan); | 609 | macvlan_hash_add(vlan); |
| 604 | return 0; | 610 | return 0; |
| 605 | 611 | ||
| 612 | clear_multi: | ||
| 613 | dev_set_allmulti(lowerdev, -1); | ||
| 606 | del_unicast: | 614 | del_unicast: |
| 607 | dev_uc_del(lowerdev, dev->dev_addr); | 615 | dev_uc_del(lowerdev, dev->dev_addr); |
| 608 | out: | 616 | out: |
| @@ -638,6 +646,9 @@ static int macvlan_stop(struct net_device *dev) | |||
| 638 | if (dev->flags & IFF_ALLMULTI) | 646 | if (dev->flags & IFF_ALLMULTI) |
| 639 | dev_set_allmulti(lowerdev, -1); | 647 | dev_set_allmulti(lowerdev, -1); |
| 640 | 648 | ||
| 649 | if (dev->flags & IFF_PROMISC) | ||
| 650 | dev_set_promiscuity(lowerdev, -1); | ||
| 651 | |||
| 641 | dev_uc_del(lowerdev, dev->dev_addr); | 652 | dev_uc_del(lowerdev, dev->dev_addr); |
| 642 | 653 | ||
| 643 | hash_del: | 654 | hash_del: |
| @@ -696,6 +707,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) | |||
| 696 | if (dev->flags & IFF_UP) { | 707 | if (dev->flags & IFF_UP) { |
| 697 | if (change & IFF_ALLMULTI) | 708 | if (change & IFF_ALLMULTI) |
| 698 | dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); | 709 | dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); |
| 710 | if (change & IFF_PROMISC) | ||
| 711 | dev_set_promiscuity(lowerdev, | ||
| 712 | dev->flags & IFF_PROMISC ? 1 : -1); | ||
| 713 | |||
| 699 | } | 714 | } |
| 700 | } | 715 | } |
| 701 | 716 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 8fadaa14b9f0..70641d2c0429 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -27,6 +27,7 @@ config AMD_PHY | |||
| 27 | config AMD_XGBE_PHY | 27 | config AMD_XGBE_PHY |
| 28 | tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" | 28 | tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs" |
| 29 | depends on (OF || ACPI) && HAS_IOMEM | 29 | depends on (OF || ACPI) && HAS_IOMEM |
| 30 | depends on ARM64 || COMPILE_TEST | ||
| 30 | ---help--- | 31 | ---help--- |
| 31 | Currently supports the AMD 10GbE PHY | 32 | Currently supports the AMD 10GbE PHY |
| 32 | 33 | ||
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index fb276f64cd64..34a75cba3b73 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c | |||
| @@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev, | |||
| 755 | return ret; | 755 | return ret; |
| 756 | } | 756 | } |
| 757 | 757 | ||
| 758 | static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev) | ||
| 759 | { | ||
| 760 | if (phydev->autoneg == AUTONEG_ENABLE) { | ||
| 761 | if (phydev->advertising & ADVERTISED_10000baseKR_Full) | ||
| 762 | return true; | ||
| 763 | } else { | ||
| 764 | if (phydev->speed == SPEED_10000) | ||
| 765 | return true; | ||
| 766 | } | ||
| 767 | |||
| 768 | return false; | ||
| 769 | } | ||
| 770 | |||
| 771 | static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev) | ||
| 772 | { | ||
| 773 | if (phydev->autoneg == AUTONEG_ENABLE) { | ||
| 774 | if (phydev->advertising & ADVERTISED_2500baseX_Full) | ||
| 775 | return true; | ||
| 776 | } else { | ||
| 777 | if (phydev->speed == SPEED_2500) | ||
| 778 | return true; | ||
| 779 | } | ||
| 780 | |||
| 781 | return false; | ||
| 782 | } | ||
| 783 | |||
| 784 | static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev) | ||
| 785 | { | ||
| 786 | if (phydev->autoneg == AUTONEG_ENABLE) { | ||
| 787 | if (phydev->advertising & ADVERTISED_1000baseKX_Full) | ||
| 788 | return true; | ||
| 789 | } else { | ||
| 790 | if (phydev->speed == SPEED_1000) | ||
| 791 | return true; | ||
| 792 | } | ||
| 793 | |||
| 794 | return false; | ||
| 795 | } | ||
| 796 | |||
| 758 | static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable, | 797 | static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable, |
| 759 | bool restart) | 798 | bool restart) |
| 760 | { | 799 | { |
| @@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev) | |||
| 1235 | /* Set initial mode - call the mode setting routines | 1274 | /* Set initial mode - call the mode setting routines |
| 1236 | * directly to insure we are properly configured | 1275 | * directly to insure we are properly configured |
| 1237 | */ | 1276 | */ |
| 1238 | if (phydev->advertising & SUPPORTED_10000baseKR_Full) | 1277 | if (amd_xgbe_phy_use_xgmii_mode(phydev)) |
| 1239 | ret = amd_xgbe_phy_xgmii_mode(phydev); | 1278 | ret = amd_xgbe_phy_xgmii_mode(phydev); |
| 1240 | else if (phydev->advertising & SUPPORTED_1000baseKX_Full) | 1279 | else if (amd_xgbe_phy_use_gmii_mode(phydev)) |
| 1241 | ret = amd_xgbe_phy_gmii_mode(phydev); | 1280 | ret = amd_xgbe_phy_gmii_mode(phydev); |
| 1242 | else if (phydev->advertising & SUPPORTED_2500baseX_Full) | 1281 | else if (amd_xgbe_phy_use_gmii_2500_mode(phydev)) |
| 1243 | ret = amd_xgbe_phy_gmii_2500_mode(phydev); | 1282 | ret = amd_xgbe_phy_gmii_2500_mode(phydev); |
| 1244 | else | 1283 | else |
| 1245 | ret = -EINVAL; | 1284 | ret = -EINVAL; |
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 64c74c6a4828..b5dc59de094e 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c | |||
| @@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = { | |||
| 404 | .name = "Broadcom BCM7425", | 404 | .name = "Broadcom BCM7425", |
| 405 | .features = PHY_GBIT_FEATURES | | 405 | .features = PHY_GBIT_FEATURES | |
| 406 | SUPPORTED_Pause | SUPPORTED_Asym_Pause, | 406 | SUPPORTED_Pause | SUPPORTED_Asym_Pause, |
| 407 | .flags = 0, | 407 | .flags = PHY_IS_INTERNAL, |
| 408 | .config_init = bcm7xxx_config_init, | 408 | .config_init = bcm7xxx_config_init, |
| 409 | .config_aneg = genphy_config_aneg, | 409 | .config_aneg = genphy_config_aneg, |
| 410 | .read_status = genphy_read_status, | 410 | .read_status = genphy_read_status, |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 496e02f961d3..00cb41e71312 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | #define PSF_TX 0x1000 | 47 | #define PSF_TX 0x1000 |
| 48 | #define EXT_EVENT 1 | 48 | #define EXT_EVENT 1 |
| 49 | #define CAL_EVENT 7 | 49 | #define CAL_EVENT 7 |
| 50 | #define CAL_TRIGGER 7 | 50 | #define CAL_TRIGGER 1 |
| 51 | #define DP83640_N_PINS 12 | 51 | #define DP83640_N_PINS 12 |
| 52 | 52 | ||
| 53 | #define MII_DP83640_MICR 0x11 | 53 | #define MII_DP83640_MICR 0x11 |
| @@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp, | |||
| 496 | else | 496 | else |
| 497 | evnt |= EVNT_RISE; | 497 | evnt |= EVNT_RISE; |
| 498 | } | 498 | } |
| 499 | mutex_lock(&clock->extreg_lock); | ||
| 499 | ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); | 500 | ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); |
| 501 | mutex_unlock(&clock->extreg_lock); | ||
| 500 | return 0; | 502 | return 0; |
| 501 | 503 | ||
| 502 | case PTP_CLK_REQ_PEROUT: | 504 | case PTP_CLK_REQ_PEROUT: |
| @@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F }; | |||
| 532 | 534 | ||
| 533 | static void enable_status_frames(struct phy_device *phydev, bool on) | 535 | static void enable_status_frames(struct phy_device *phydev, bool on) |
| 534 | { | 536 | { |
| 537 | struct dp83640_private *dp83640 = phydev->priv; | ||
| 538 | struct dp83640_clock *clock = dp83640->clock; | ||
| 535 | u16 cfg0 = 0, ver; | 539 | u16 cfg0 = 0, ver; |
| 536 | 540 | ||
| 537 | if (on) | 541 | if (on) |
| @@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on) | |||
| 539 | 543 | ||
| 540 | ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; | 544 | ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; |
| 541 | 545 | ||
| 546 | mutex_lock(&clock->extreg_lock); | ||
| 547 | |||
| 542 | ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); | 548 | ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); |
| 543 | ext_write(0, phydev, PAGE6, PSF_CFG1, ver); | 549 | ext_write(0, phydev, PAGE6, PSF_CFG1, ver); |
| 544 | 550 | ||
| 551 | mutex_unlock(&clock->extreg_lock); | ||
| 552 | |||
| 545 | if (!phydev->attached_dev) { | 553 | if (!phydev->attached_dev) { |
| 546 | pr_warn("expected to find an attached netdevice\n"); | 554 | pr_warn("expected to find an attached netdevice\n"); |
| 547 | return; | 555 | return; |
| @@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
| 838 | list_del_init(&rxts->list); | 846 | list_del_init(&rxts->list); |
| 839 | phy2rxts(phy_rxts, rxts); | 847 | phy2rxts(phy_rxts, rxts); |
| 840 | 848 | ||
| 841 | spin_lock_irqsave(&dp83640->rx_queue.lock, flags); | 849 | spin_lock(&dp83640->rx_queue.lock); |
| 842 | skb_queue_walk(&dp83640->rx_queue, skb) { | 850 | skb_queue_walk(&dp83640->rx_queue, skb) { |
| 843 | struct dp83640_skb_info *skb_info; | 851 | struct dp83640_skb_info *skb_info; |
| 844 | 852 | ||
| @@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
| 853 | break; | 861 | break; |
| 854 | } | 862 | } |
| 855 | } | 863 | } |
| 856 | spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags); | 864 | spin_unlock(&dp83640->rx_queue.lock); |
| 857 | 865 | ||
| 858 | if (!shhwtstamps) | 866 | if (!shhwtstamps) |
| 859 | list_add_tail(&rxts->list, &dp83640->rxts); | 867 | list_add_tail(&rxts->list, &dp83640->rxts); |
| @@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev) | |||
| 1173 | 1181 | ||
| 1174 | if (clock->chosen && !list_empty(&clock->phylist)) | 1182 | if (clock->chosen && !list_empty(&clock->phylist)) |
| 1175 | recalibrate(clock); | 1183 | recalibrate(clock); |
| 1176 | else | 1184 | else { |
| 1185 | mutex_lock(&clock->extreg_lock); | ||
| 1177 | enable_broadcast(phydev, clock->page, 1); | 1186 | enable_broadcast(phydev, clock->page, 1); |
| 1187 | mutex_unlock(&clock->extreg_lock); | ||
| 1188 | } | ||
| 1178 | 1189 | ||
| 1179 | enable_status_frames(phydev, true); | 1190 | enable_status_frames(phydev, true); |
| 1191 | |||
| 1192 | mutex_lock(&clock->extreg_lock); | ||
| 1180 | ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); | 1193 | ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); |
| 1194 | mutex_unlock(&clock->extreg_lock); | ||
| 1195 | |||
| 1181 | return 0; | 1196 | return 0; |
| 1182 | } | 1197 | } |
| 1183 | 1198 | ||
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 49ce7ece5af3..53d18150f4e2 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
| @@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) | |||
| 80 | * assume the pin serves as pull-up. If direction is | 80 | * assume the pin serves as pull-up. If direction is |
| 81 | * output, the default value is high. | 81 | * output, the default value is high. |
| 82 | */ | 82 | */ |
| 83 | gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); | 83 | gpio_set_value_cansleep(bitbang->mdo, |
| 84 | 1 ^ bitbang->mdo_active_low); | ||
| 84 | return; | 85 | return; |
| 85 | } | 86 | } |
| 86 | 87 | ||
| @@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) | |||
| 96 | struct mdio_gpio_info *bitbang = | 97 | struct mdio_gpio_info *bitbang = |
| 97 | container_of(ctrl, struct mdio_gpio_info, ctrl); | 98 | container_of(ctrl, struct mdio_gpio_info, ctrl); |
| 98 | 99 | ||
| 99 | return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; | 100 | return gpio_get_value_cansleep(bitbang->mdio) ^ |
| 101 | bitbang->mdio_active_low; | ||
| 100 | } | 102 | } |
| 101 | 103 | ||
| 102 | static void mdio_set(struct mdiobb_ctrl *ctrl, int what) | 104 | static void mdio_set(struct mdiobb_ctrl *ctrl, int what) |
| @@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) | |||
| 105 | container_of(ctrl, struct mdio_gpio_info, ctrl); | 107 | container_of(ctrl, struct mdio_gpio_info, ctrl); |
| 106 | 108 | ||
| 107 | if (bitbang->mdo) | 109 | if (bitbang->mdo) |
| 108 | gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); | 110 | gpio_set_value_cansleep(bitbang->mdo, |
| 111 | what ^ bitbang->mdo_active_low); | ||
| 109 | else | 112 | else |
| 110 | gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); | 113 | gpio_set_value_cansleep(bitbang->mdio, |
| 114 | what ^ bitbang->mdio_active_low); | ||
| 111 | } | 115 | } |
| 112 | 116 | ||
| 113 | static void mdc_set(struct mdiobb_ctrl *ctrl, int what) | 117 | static void mdc_set(struct mdiobb_ctrl *ctrl, int what) |
| @@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) | |||
| 115 | struct mdio_gpio_info *bitbang = | 119 | struct mdio_gpio_info *bitbang = |
| 116 | container_of(ctrl, struct mdio_gpio_info, ctrl); | 120 | container_of(ctrl, struct mdio_gpio_info, ctrl); |
| 117 | 121 | ||
| 118 | gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); | 122 | gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); |
| 119 | } | 123 | } |
| 120 | 124 | ||
| 121 | static struct mdiobb_ops mdio_gpio_ops = { | 125 | static struct mdiobb_ops mdio_gpio_ops = { |
| @@ -164,7 +168,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, | |||
| 164 | if (!new_bus->irq[i]) | 168 | if (!new_bus->irq[i]) |
| 165 | new_bus->irq[i] = PHY_POLL; | 169 | new_bus->irq[i] = PHY_POLL; |
| 166 | 170 | ||
| 167 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); | 171 | if (bus_id != -1) |
| 172 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); | ||
| 173 | else | ||
| 174 | strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); | ||
| 168 | 175 | ||
| 169 | if (devm_gpio_request(dev, bitbang->mdc, "mdc")) | 176 | if (devm_gpio_request(dev, bitbang->mdc, "mdc")) |
| 170 | goto out_free_bus; | 177 | goto out_free_bus; |
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 1a87a585e74d..66edd99bc302 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c | |||
| @@ -12,33 +12,30 @@ | |||
| 12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| 13 | #include <linux/phy.h> | 13 | #include <linux/phy.h> |
| 14 | #include <linux/mdio-mux.h> | 14 | #include <linux/mdio-mux.h> |
| 15 | #include <linux/of_gpio.h> | 15 | #include <linux/gpio/consumer.h> |
| 16 | 16 | ||
| 17 | #define DRV_VERSION "1.1" | 17 | #define DRV_VERSION "1.1" |
| 18 | #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" | 18 | #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" |
| 19 | 19 | ||
| 20 | #define MDIO_MUX_GPIO_MAX_BITS 8 | ||
| 21 | |||
| 22 | struct mdio_mux_gpio_state { | 20 | struct mdio_mux_gpio_state { |
| 23 | struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS]; | 21 | struct gpio_descs *gpios; |
| 24 | unsigned int num_gpios; | ||
| 25 | void *mux_handle; | 22 | void *mux_handle; |
| 26 | }; | 23 | }; |
| 27 | 24 | ||
| 28 | static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, | 25 | static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, |
| 29 | void *data) | 26 | void *data) |
| 30 | { | 27 | { |
| 31 | int values[MDIO_MUX_GPIO_MAX_BITS]; | ||
| 32 | unsigned int n; | ||
| 33 | struct mdio_mux_gpio_state *s = data; | 28 | struct mdio_mux_gpio_state *s = data; |
| 29 | int values[s->gpios->ndescs]; | ||
| 30 | unsigned int n; | ||
| 34 | 31 | ||
| 35 | if (current_child == desired_child) | 32 | if (current_child == desired_child) |
| 36 | return 0; | 33 | return 0; |
| 37 | 34 | ||
| 38 | for (n = 0; n < s->num_gpios; n++) { | 35 | for (n = 0; n < s->gpios->ndescs; n++) |
| 39 | values[n] = (desired_child >> n) & 1; | 36 | values[n] = (desired_child >> n) & 1; |
| 40 | } | 37 | |
| 41 | gpiod_set_array_cansleep(s->num_gpios, s->gpio, values); | 38 | gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values); |
| 42 | 39 | ||
| 43 | return 0; | 40 | return 0; |
| 44 | } | 41 | } |
| @@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, | |||
| 46 | static int mdio_mux_gpio_probe(struct platform_device *pdev) | 43 | static int mdio_mux_gpio_probe(struct platform_device *pdev) |
| 47 | { | 44 | { |
| 48 | struct mdio_mux_gpio_state *s; | 45 | struct mdio_mux_gpio_state *s; |
| 49 | int num_gpios; | ||
| 50 | unsigned int n; | ||
| 51 | int r; | 46 | int r; |
| 52 | 47 | ||
| 53 | if (!pdev->dev.of_node) | ||
| 54 | return -ENODEV; | ||
| 55 | |||
| 56 | num_gpios = of_gpio_count(pdev->dev.of_node); | ||
| 57 | if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS) | ||
| 58 | return -ENODEV; | ||
| 59 | |||
| 60 | s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); | 48 | s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); |
| 61 | if (!s) | 49 | if (!s) |
| 62 | return -ENOMEM; | 50 | return -ENOMEM; |
| 63 | 51 | ||
| 64 | s->num_gpios = num_gpios; | 52 | s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); |
| 65 | 53 | if (IS_ERR(s->gpios)) | |
| 66 | for (n = 0; n < num_gpios; ) { | 54 | return PTR_ERR(s->gpios); |
| 67 | struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n, | ||
| 68 | GPIOD_OUT_LOW); | ||
| 69 | if (IS_ERR(gpio)) { | ||
| 70 | r = PTR_ERR(gpio); | ||
| 71 | goto err; | ||
| 72 | } | ||
| 73 | s->gpio[n] = gpio; | ||
| 74 | n++; | ||
| 75 | } | ||
| 76 | 55 | ||
| 77 | r = mdio_mux_init(&pdev->dev, | 56 | r = mdio_mux_init(&pdev->dev, |
| 78 | mdio_mux_gpio_switch_fn, &s->mux_handle, s); | 57 | mdio_mux_gpio_switch_fn, &s->mux_handle, s); |
| 79 | 58 | ||
| 80 | if (r == 0) { | 59 | if (r != 0) { |
| 81 | pdev->dev.platform_data = s; | 60 | gpiod_put_array(s->gpios); |
| 82 | return 0; | 61 | return r; |
| 83 | } | ||
| 84 | err: | ||
| 85 | while (n) { | ||
| 86 | n--; | ||
| 87 | gpiod_put(s->gpio[n]); | ||
| 88 | } | 62 | } |
| 89 | return r; | 63 | |
| 64 | pdev->dev.platform_data = s; | ||
| 65 | return 0; | ||
| 90 | } | 66 | } |
| 91 | 67 | ||
| 92 | static int mdio_mux_gpio_remove(struct platform_device *pdev) | 68 | static int mdio_mux_gpio_remove(struct platform_device *pdev) |
| 93 | { | 69 | { |
| 94 | unsigned int n; | ||
| 95 | struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); | 70 | struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); |
| 96 | mdio_mux_uninit(s->mux_handle); | 71 | mdio_mux_uninit(s->mux_handle); |
| 97 | for (n = 0; n < s->num_gpios; n++) | 72 | gpiod_put_array(s->gpios); |
| 98 | gpiod_put(s->gpio[n]); | ||
| 99 | return 0; | 73 | return 0; |
| 100 | } | 74 | } |
| 101 | 75 | ||
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1190fd8f0088..ebdc357c5131 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
| @@ -548,7 +548,8 @@ static int kszphy_probe(struct phy_device *phydev) | |||
| 548 | } | 548 | } |
| 549 | 549 | ||
| 550 | clk = devm_clk_get(&phydev->dev, "rmii-ref"); | 550 | clk = devm_clk_get(&phydev->dev, "rmii-ref"); |
| 551 | if (!IS_ERR(clk)) { | 551 | /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ |
| 552 | if (!IS_ERR_OR_NULL(clk)) { | ||
| 552 | unsigned long rate = clk_get_rate(clk); | 553 | unsigned long rate = clk_get_rate(clk); |
| 553 | bool rmii_ref_clk_sel_25_mhz; | 554 | bool rmii_ref_clk_sel_25_mhz; |
| 554 | 555 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 52cd8db2c57d..47cd578052fc 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop); | |||
| 742 | */ | 742 | */ |
| 743 | void phy_start(struct phy_device *phydev) | 743 | void phy_start(struct phy_device *phydev) |
| 744 | { | 744 | { |
| 745 | bool do_resume = false; | ||
| 746 | int err = 0; | ||
| 747 | |||
| 745 | mutex_lock(&phydev->lock); | 748 | mutex_lock(&phydev->lock); |
| 746 | 749 | ||
| 747 | switch (phydev->state) { | 750 | switch (phydev->state) { |
| @@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev) | |||
| 752 | phydev->state = PHY_UP; | 755 | phydev->state = PHY_UP; |
| 753 | break; | 756 | break; |
| 754 | case PHY_HALTED: | 757 | case PHY_HALTED: |
| 758 | /* make sure interrupts are re-enabled for the PHY */ | ||
| 759 | err = phy_enable_interrupts(phydev); | ||
| 760 | if (err < 0) | ||
| 761 | break; | ||
| 762 | |||
| 755 | phydev->state = PHY_RESUMING; | 763 | phydev->state = PHY_RESUMING; |
| 764 | do_resume = true; | ||
| 765 | break; | ||
| 756 | default: | 766 | default: |
| 757 | break; | 767 | break; |
| 758 | } | 768 | } |
| 759 | mutex_unlock(&phydev->lock); | 769 | mutex_unlock(&phydev->lock); |
| 770 | |||
| 771 | /* if phy was suspended, bring the physical link up again */ | ||
| 772 | if (do_resume) | ||
| 773 | phy_resume(phydev); | ||
| 760 | } | 774 | } |
| 761 | EXPORT_SYMBOL(phy_start); | 775 | EXPORT_SYMBOL(phy_start); |
| 762 | 776 | ||
| @@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work) | |||
| 769 | struct delayed_work *dwork = to_delayed_work(work); | 783 | struct delayed_work *dwork = to_delayed_work(work); |
| 770 | struct phy_device *phydev = | 784 | struct phy_device *phydev = |
| 771 | container_of(dwork, struct phy_device, state_queue); | 785 | container_of(dwork, struct phy_device, state_queue); |
| 772 | bool needs_aneg = false, do_suspend = false, do_resume = false; | 786 | bool needs_aneg = false, do_suspend = false; |
| 773 | int err = 0; | 787 | int err = 0; |
| 774 | 788 | ||
| 775 | mutex_lock(&phydev->lock); | 789 | mutex_lock(&phydev->lock); |
| @@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work) | |||
| 888 | } | 902 | } |
| 889 | break; | 903 | break; |
| 890 | case PHY_RESUMING: | 904 | case PHY_RESUMING: |
| 891 | err = phy_clear_interrupt(phydev); | ||
| 892 | if (err) | ||
| 893 | break; | ||
| 894 | |||
| 895 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); | ||
| 896 | if (err) | ||
| 897 | break; | ||
| 898 | |||
| 899 | if (AUTONEG_ENABLE == phydev->autoneg) { | 905 | if (AUTONEG_ENABLE == phydev->autoneg) { |
| 900 | err = phy_aneg_done(phydev); | 906 | err = phy_aneg_done(phydev); |
| 901 | if (err < 0) | 907 | if (err < 0) |
| @@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work) | |||
| 933 | } | 939 | } |
| 934 | phydev->adjust_link(phydev->attached_dev); | 940 | phydev->adjust_link(phydev->attached_dev); |
| 935 | } | 941 | } |
| 936 | do_resume = true; | ||
| 937 | break; | 942 | break; |
| 938 | } | 943 | } |
| 939 | 944 | ||
| @@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work) | |||
| 943 | err = phy_start_aneg(phydev); | 948 | err = phy_start_aneg(phydev); |
| 944 | else if (do_suspend) | 949 | else if (do_suspend) |
| 945 | phy_suspend(phydev); | 950 | phy_suspend(phydev); |
| 946 | else if (do_resume) | ||
| 947 | phy_resume(phydev); | ||
| 948 | 951 | ||
| 949 | if (err < 0) | 952 | if (err < 0) |
| 950 | phy_error(phydev); | 953 | phy_error(phydev); |
| @@ -1053,13 +1056,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
| 1053 | { | 1056 | { |
| 1054 | /* According to 802.3az,the EEE is supported only in full duplex-mode. | 1057 | /* According to 802.3az,the EEE is supported only in full duplex-mode. |
| 1055 | * Also EEE feature is active when core is operating with MII, GMII | 1058 | * Also EEE feature is active when core is operating with MII, GMII |
| 1056 | * or RGMII. Internal PHYs are also allowed to proceed and should | 1059 | * or RGMII (all kinds). Internal PHYs are also allowed to proceed and |
| 1057 | * return an error if they do not support EEE. | 1060 | * should return an error if they do not support EEE. |
| 1058 | */ | 1061 | */ |
| 1059 | if ((phydev->duplex == DUPLEX_FULL) && | 1062 | if ((phydev->duplex == DUPLEX_FULL) && |
| 1060 | ((phydev->interface == PHY_INTERFACE_MODE_MII) || | 1063 | ((phydev->interface == PHY_INTERFACE_MODE_MII) || |
| 1061 | (phydev->interface == PHY_INTERFACE_MODE_GMII) || | 1064 | (phydev->interface == PHY_INTERFACE_MODE_GMII) || |
| 1062 | (phydev->interface == PHY_INTERFACE_MODE_RGMII) || | 1065 | (phydev->interface >= PHY_INTERFACE_MODE_RGMII && |
| 1066 | phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) || | ||
| 1063 | phy_is_internal(phydev))) { | 1067 | phy_is_internal(phydev))) { |
| 1064 | int eee_lp, eee_cap, eee_adv; | 1068 | int eee_lp, eee_cap, eee_adv; |
| 1065 | u32 lp, cap, adv; | 1069 | u32 lp, cap, adv; |
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 911b21602ff2..05005c660d4d 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c | |||
| @@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
| 478 | struct blkcipher_desc desc = { .tfm = state->arc4 }; | 478 | struct blkcipher_desc desc = { .tfm = state->arc4 }; |
| 479 | unsigned ccount; | 479 | unsigned ccount; |
| 480 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; | 480 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; |
| 481 | int sanity = 0; | ||
| 482 | struct scatterlist sg_in[1], sg_out[1]; | 481 | struct scatterlist sg_in[1], sg_out[1]; |
| 483 | 482 | ||
| 484 | if (isize <= PPP_HDRLEN + MPPE_OVHD) { | 483 | if (isize <= PPP_HDRLEN + MPPE_OVHD) { |
| @@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
| 514 | "mppe_decompress[%d]: ENCRYPTED bit not set!\n", | 513 | "mppe_decompress[%d]: ENCRYPTED bit not set!\n", |
| 515 | state->unit); | 514 | state->unit); |
| 516 | state->sanity_errors += 100; | 515 | state->sanity_errors += 100; |
| 517 | sanity = 1; | 516 | goto sanity_error; |
| 518 | } | 517 | } |
| 519 | if (!state->stateful && !flushed) { | 518 | if (!state->stateful && !flushed) { |
| 520 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " | 519 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " |
| 521 | "stateless mode!\n", state->unit); | 520 | "stateless mode!\n", state->unit); |
| 522 | state->sanity_errors += 100; | 521 | state->sanity_errors += 100; |
| 523 | sanity = 1; | 522 | goto sanity_error; |
| 524 | } | 523 | } |
| 525 | if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { | 524 | if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { |
| 526 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " | 525 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " |
| 527 | "flag packet!\n", state->unit); | 526 | "flag packet!\n", state->unit); |
| 528 | state->sanity_errors += 100; | 527 | state->sanity_errors += 100; |
| 529 | sanity = 1; | 528 | goto sanity_error; |
| 530 | } | ||
| 531 | |||
| 532 | if (sanity) { | ||
| 533 | if (state->sanity_errors < SANITY_MAX) | ||
| 534 | return DECOMP_ERROR; | ||
| 535 | else | ||
| 536 | /* | ||
| 537 | * Take LCP down if the peer is sending too many bogons. | ||
| 538 | * We don't want to do this for a single or just a few | ||
| 539 | * instances since it could just be due to packet corruption. | ||
| 540 | */ | ||
| 541 | return DECOMP_FATALERROR; | ||
| 542 | } | 529 | } |
| 543 | 530 | ||
| 544 | /* | 531 | /* |
| @@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
| 546 | */ | 533 | */ |
| 547 | 534 | ||
| 548 | if (!state->stateful) { | 535 | if (!state->stateful) { |
| 536 | /* Discard late packet */ | ||
| 537 | if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE | ||
| 538 | > MPPE_CCOUNT_SPACE / 2) { | ||
| 539 | state->sanity_errors++; | ||
| 540 | goto sanity_error; | ||
| 541 | } | ||
| 542 | |||
| 549 | /* RFC 3078, sec 8.1. Rekey for every packet. */ | 543 | /* RFC 3078, sec 8.1. Rekey for every packet. */ |
| 550 | while (state->ccount != ccount) { | 544 | while (state->ccount != ccount) { |
| 551 | mppe_rekey(state, 0); | 545 | mppe_rekey(state, 0); |
| @@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
| 649 | state->sanity_errors >>= 1; | 643 | state->sanity_errors >>= 1; |
| 650 | 644 | ||
| 651 | return osize; | 645 | return osize; |
| 646 | |||
| 647 | sanity_error: | ||
| 648 | if (state->sanity_errors < SANITY_MAX) | ||
| 649 | return DECOMP_ERROR; | ||
| 650 | else | ||
| 651 | /* Take LCP down if the peer is sending too many bogons. | ||
| 652 | * We don't want to do this for a single or just a few | ||
| 653 | * instances since it could just be due to packet corruption. | ||
| 654 | */ | ||
| 655 | return DECOMP_FATALERROR; | ||
| 652 | } | 656 | } |
| 653 | 657 | ||
| 654 | /* | 658 | /* |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index aa1dd926623a..b62a5e3a1c65 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
| @@ -465,6 +465,10 @@ static void pppoe_unbind_sock_work(struct work_struct *work) | |||
| 465 | struct sock *sk = sk_pppox(po); | 465 | struct sock *sk = sk_pppox(po); |
| 466 | 466 | ||
| 467 | lock_sock(sk); | 467 | lock_sock(sk); |
| 468 | if (po->pppoe_dev) { | ||
| 469 | dev_put(po->pppoe_dev); | ||
| 470 | po->pppoe_dev = NULL; | ||
| 471 | } | ||
| 468 | pppox_unbind_sock(sk); | 472 | pppox_unbind_sock(sk); |
| 469 | release_sock(sk); | 473 | release_sock(sk); |
| 470 | sock_put(sk); | 474 | sock_put(sk); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index c3e4da9e79ca..8067b8fbb0ee 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) | |||
| 1182 | * payload data instead. | 1182 | * payload data instead. |
| 1183 | */ | 1183 | */ |
| 1184 | usbnet_set_skb_tx_stats(skb_out, n, | 1184 | usbnet_set_skb_tx_stats(skb_out, n, |
| 1185 | ctx->tx_curr_frame_payload - skb_out->len); | 1185 | (long)ctx->tx_curr_frame_payload - skb_out->len); |
| 1186 | 1186 | ||
| 1187 | return skb_out; | 1187 | return skb_out; |
| 1188 | 1188 | ||
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ac4d03b328b1..aafa1a1898e4 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -4116,6 +4116,7 @@ static struct usb_device_id rtl8152_table[] = { | |||
| 4116 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, | 4116 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, |
| 4117 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, | 4117 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, |
| 4118 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, | 4118 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, |
| 4119 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, | ||
| 4119 | {} | 4120 | {} |
| 4120 | }; | 4121 | }; |
| 4121 | 4122 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 733f4feb2ef3..3c86b107275a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
| @@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
| 1285 | struct net_device *net) | 1285 | struct net_device *net) |
| 1286 | { | 1286 | { |
| 1287 | struct usbnet *dev = netdev_priv(net); | 1287 | struct usbnet *dev = netdev_priv(net); |
| 1288 | int length; | 1288 | unsigned int length; |
| 1289 | struct urb *urb = NULL; | 1289 | struct urb *urb = NULL; |
| 1290 | struct skb_data *entry; | 1290 | struct skb_data *entry; |
| 1291 | struct driver_info *info = dev->driver_info; | 1291 | struct driver_info *info = dev->driver_info; |
| @@ -1413,7 +1413,7 @@ not_drop: | |||
| 1413 | } | 1413 | } |
| 1414 | } else | 1414 | } else |
| 1415 | netif_dbg(dev, tx_queued, dev->net, | 1415 | netif_dbg(dev, tx_queued, dev->net, |
| 1416 | "> tx, len %d, type 0x%x\n", length, skb->protocol); | 1416 | "> tx, len %u, type 0x%x\n", length, skb->protocol); |
| 1417 | #ifdef CONFIG_PM | 1417 | #ifdef CONFIG_PM |
| 1418 | deferred: | 1418 | deferred: |
| 1419 | #endif | 1419 | #endif |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 154116aafd0d..21a0fbf1ed94 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, | |||
| 730 | /* Only change unicasts */ | 730 | /* Only change unicasts */ |
| 731 | if (!(is_multicast_ether_addr(f->eth_addr) || | 731 | if (!(is_multicast_ether_addr(f->eth_addr) || |
| 732 | is_zero_ether_addr(f->eth_addr))) { | 732 | is_zero_ether_addr(f->eth_addr))) { |
| 733 | int rc = vxlan_fdb_replace(f, ip, port, vni, | 733 | notify |= vxlan_fdb_replace(f, ip, port, vni, |
| 734 | ifindex); | 734 | ifindex); |
| 735 | |||
| 736 | if (rc < 0) | ||
| 737 | return rc; | ||
| 738 | notify |= rc; | ||
| 739 | } else | 735 | } else |
| 740 | return -EOPNOTSUPP; | 736 | return -EOPNOTSUPP; |
| 741 | } | 737 | } |
| @@ -2965,7 +2961,7 @@ static void __net_exit vxlan_exit_net(struct net *net) | |||
| 2965 | * to the list by the previous loop. | 2961 | * to the list by the previous loop. |
| 2966 | */ | 2962 | */ |
| 2967 | if (!net_eq(dev_net(vxlan->dev), net)) | 2963 | if (!net_eq(dev_net(vxlan->dev), net)) |
| 2968 | unregister_netdevice_queue(dev, &list); | 2964 | unregister_netdevice_queue(vxlan->dev, &list); |
| 2969 | } | 2965 | } |
| 2970 | 2966 | ||
| 2971 | unregister_netdevice_many(&list); | 2967 | unregister_netdevice_many(&list); |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0acd079ba96b..3ad79bb4f2c2 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -1103,28 +1103,14 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, | |||
| 1103 | struct sk_buff *skb; | 1103 | struct sk_buff *skb; |
| 1104 | struct ath_frame_info *fi; | 1104 | struct ath_frame_info *fi; |
| 1105 | struct ieee80211_tx_info *info; | 1105 | struct ieee80211_tx_info *info; |
| 1106 | struct ieee80211_vif *vif; | ||
| 1107 | struct ath_hw *ah = sc->sc_ah; | 1106 | struct ath_hw *ah = sc->sc_ah; |
| 1108 | 1107 | ||
| 1109 | if (sc->tx99_state || !ah->tpc_enabled) | 1108 | if (sc->tx99_state || !ah->tpc_enabled) |
| 1110 | return MAX_RATE_POWER; | 1109 | return MAX_RATE_POWER; |
| 1111 | 1110 | ||
| 1112 | skb = bf->bf_mpdu; | 1111 | skb = bf->bf_mpdu; |
| 1113 | info = IEEE80211_SKB_CB(skb); | ||
| 1114 | vif = info->control.vif; | ||
| 1115 | |||
| 1116 | if (!vif) { | ||
| 1117 | max_power = sc->cur_chan->cur_txpower; | ||
| 1118 | goto out; | ||
| 1119 | } | ||
| 1120 | |||
| 1121 | if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) { | ||
| 1122 | max_power = min_t(u8, sc->cur_chan->cur_txpower, | ||
| 1123 | 2 * vif->bss_conf.txpower); | ||
| 1124 | goto out; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | fi = get_frame_info(skb); | 1112 | fi = get_frame_info(skb); |
| 1113 | info = IEEE80211_SKB_CB(skb); | ||
| 1128 | 1114 | ||
| 1129 | if (!AR_SREV_9300_20_OR_LATER(ah)) { | 1115 | if (!AR_SREV_9300_20_OR_LATER(ah)) { |
| 1130 | int txpower = fi->tx_power; | 1116 | int txpower = fi->tx_power; |
| @@ -1161,25 +1147,26 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf, | |||
| 1161 | txpower -= 2; | 1147 | txpower -= 2; |
| 1162 | 1148 | ||
| 1163 | txpower = max(txpower, 0); | 1149 | txpower = max(txpower, 0); |
| 1164 | max_power = min_t(u8, ah->tx_power[rateidx], | 1150 | max_power = min_t(u8, ah->tx_power[rateidx], txpower); |
| 1165 | 2 * vif->bss_conf.txpower); | 1151 | |
| 1166 | max_power = min_t(u8, max_power, txpower); | 1152 | /* XXX: clamp minimum TX power at 1 for AR9160 since if |
| 1153 | * max_power is set to 0, frames are transmitted at max | ||
| 1154 | * TX power | ||
| 1155 | */ | ||
| 1156 | if (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) | ||
| 1157 | max_power = 1; | ||
| 1167 | } else if (!bf->bf_state.bfs_paprd) { | 1158 | } else if (!bf->bf_state.bfs_paprd) { |
| 1168 | if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) | 1159 | if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC)) |
| 1169 | max_power = min_t(u8, ah->tx_power_stbc[rateidx], | 1160 | max_power = min_t(u8, ah->tx_power_stbc[rateidx], |
| 1170 | 2 * vif->bss_conf.txpower); | 1161 | fi->tx_power); |
| 1171 | else | 1162 | else |
| 1172 | max_power = min_t(u8, ah->tx_power[rateidx], | 1163 | max_power = min_t(u8, ah->tx_power[rateidx], |
| 1173 | 2 * vif->bss_conf.txpower); | 1164 | fi->tx_power); |
| 1174 | max_power = min(max_power, fi->tx_power); | ||
| 1175 | } else { | 1165 | } else { |
| 1176 | max_power = ah->paprd_training_power; | 1166 | max_power = ah->paprd_training_power; |
| 1177 | } | 1167 | } |
| 1178 | out: | 1168 | |
| 1179 | /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power | 1169 | return max_power; |
| 1180 | * is set to 0, frames are transmitted at max TX power | ||
| 1181 | */ | ||
| 1182 | return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power; | ||
| 1183 | } | 1170 | } |
| 1184 | 1171 | ||
| 1185 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, | 1172 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, |
| @@ -2129,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, | |||
| 2129 | struct ath_node *an = NULL; | 2116 | struct ath_node *an = NULL; |
| 2130 | enum ath9k_key_type keytype; | 2117 | enum ath9k_key_type keytype; |
| 2131 | bool short_preamble = false; | 2118 | bool short_preamble = false; |
| 2119 | u8 txpower; | ||
| 2132 | 2120 | ||
| 2133 | /* | 2121 | /* |
| 2134 | * We check if Short Preamble is needed for the CTS rate by | 2122 | * We check if Short Preamble is needed for the CTS rate by |
| @@ -2145,6 +2133,16 @@ static void setup_frame_info(struct ieee80211_hw *hw, | |||
| 2145 | if (sta) | 2133 | if (sta) |
| 2146 | an = (struct ath_node *) sta->drv_priv; | 2134 | an = (struct ath_node *) sta->drv_priv; |
| 2147 | 2135 | ||
| 2136 | if (tx_info->control.vif) { | ||
| 2137 | struct ieee80211_vif *vif = tx_info->control.vif; | ||
| 2138 | |||
| 2139 | txpower = 2 * vif->bss_conf.txpower; | ||
| 2140 | } else { | ||
| 2141 | struct ath_softc *sc = hw->priv; | ||
| 2142 | |||
| 2143 | txpower = sc->cur_chan->cur_txpower; | ||
| 2144 | } | ||
| 2145 | |||
| 2148 | memset(fi, 0, sizeof(*fi)); | 2146 | memset(fi, 0, sizeof(*fi)); |
| 2149 | fi->txq = -1; | 2147 | fi->txq = -1; |
| 2150 | if (hw_key) | 2148 | if (hw_key) |
| @@ -2155,7 +2153,7 @@ static void setup_frame_info(struct ieee80211_hw *hw, | |||
| 2155 | fi->keyix = ATH9K_TXKEYIX_INVALID; | 2153 | fi->keyix = ATH9K_TXKEYIX_INVALID; |
| 2156 | fi->keytype = keytype; | 2154 | fi->keytype = keytype; |
| 2157 | fi->framelen = framelen; | 2155 | fi->framelen = framelen; |
| 2158 | fi->tx_power = MAX_RATE_POWER; | 2156 | fi->tx_power = txpower; |
| 2159 | 2157 | ||
| 2160 | if (!rate) | 2158 | if (!rate) |
| 2161 | return; | 2159 | return; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 4ec9811f49c8..65efb1468988 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | |||
| @@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, | |||
| 511 | msgbuf->rx_pktids, | 511 | msgbuf->rx_pktids, |
| 512 | msgbuf->ioctl_resp_pktid); | 512 | msgbuf->ioctl_resp_pktid); |
| 513 | if (msgbuf->ioctl_resp_ret_len != 0) { | 513 | if (msgbuf->ioctl_resp_ret_len != 0) { |
| 514 | if (!skb) { | 514 | if (!skb) |
| 515 | brcmf_err("Invalid packet id idx recv'd %d\n", | ||
| 516 | msgbuf->ioctl_resp_pktid); | ||
| 517 | return -EBADF; | 515 | return -EBADF; |
| 518 | } | 516 | |
| 519 | memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? | 517 | memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? |
| 520 | len : msgbuf->ioctl_resp_ret_len); | 518 | len : msgbuf->ioctl_resp_ret_len); |
| 521 | } | 519 | } |
| @@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 874 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; | 872 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; |
| 875 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | 873 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, |
| 876 | msgbuf->tx_pktids, idx); | 874 | msgbuf->tx_pktids, idx); |
| 877 | if (!skb) { | 875 | if (!skb) |
| 878 | brcmf_err("Invalid packet id idx recv'd %d\n", idx); | ||
| 879 | return; | 876 | return; |
| 880 | } | ||
| 881 | 877 | ||
| 882 | set_bit(flowid, msgbuf->txstatus_done_map); | 878 | set_bit(flowid, msgbuf->txstatus_done_map); |
| 883 | commonring = msgbuf->flowrings[flowid]; | 879 | commonring = msgbuf->flowrings[flowid]; |
| @@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 1156 | 1152 | ||
| 1157 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | 1153 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, |
| 1158 | msgbuf->rx_pktids, idx); | 1154 | msgbuf->rx_pktids, idx); |
| 1155 | if (!skb) | ||
| 1156 | return; | ||
| 1159 | 1157 | ||
| 1160 | if (data_offset) | 1158 | if (data_offset) |
| 1161 | skb_pull(skb, data_offset); | 1159 | skb_pull(skb, data_offset); |
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index ab019b45551b..f89f446e5c8a 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig | |||
| @@ -21,6 +21,7 @@ config IWLWIFI | |||
| 21 | Intel 7260 Wi-Fi Adapter | 21 | Intel 7260 Wi-Fi Adapter |
| 22 | Intel 3160 Wi-Fi Adapter | 22 | Intel 3160 Wi-Fi Adapter |
| 23 | Intel 7265 Wi-Fi Adapter | 23 | Intel 7265 Wi-Fi Adapter |
| 24 | Intel 3165 Wi-Fi Adapter | ||
| 24 | 25 | ||
| 25 | 26 | ||
| 26 | This driver uses the kernel's mac80211 subsystem. | 27 | This driver uses the kernel's mac80211 subsystem. |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 36e786f0387b..74ad278116be 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
| @@ -70,15 +70,14 @@ | |||
| 70 | 70 | ||
| 71 | /* Highest firmware API version supported */ | 71 | /* Highest firmware API version supported */ |
| 72 | #define IWL7260_UCODE_API_MAX 13 | 72 | #define IWL7260_UCODE_API_MAX 13 |
| 73 | #define IWL3160_UCODE_API_MAX 13 | ||
| 74 | 73 | ||
| 75 | /* Oldest version we won't warn about */ | 74 | /* Oldest version we won't warn about */ |
| 76 | #define IWL7260_UCODE_API_OK 12 | 75 | #define IWL7260_UCODE_API_OK 12 |
| 77 | #define IWL3160_UCODE_API_OK 12 | 76 | #define IWL3165_UCODE_API_OK 13 |
| 78 | 77 | ||
| 79 | /* Lowest firmware API version supported */ | 78 | /* Lowest firmware API version supported */ |
| 80 | #define IWL7260_UCODE_API_MIN 10 | 79 | #define IWL7260_UCODE_API_MIN 10 |
| 81 | #define IWL3160_UCODE_API_MIN 10 | 80 | #define IWL3165_UCODE_API_MIN 13 |
| 82 | 81 | ||
| 83 | /* NVM versions */ | 82 | /* NVM versions */ |
| 84 | #define IWL7260_NVM_VERSION 0x0a1d | 83 | #define IWL7260_NVM_VERSION 0x0a1d |
| @@ -104,9 +103,6 @@ | |||
| 104 | #define IWL3160_FW_PRE "iwlwifi-3160-" | 103 | #define IWL3160_FW_PRE "iwlwifi-3160-" |
| 105 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" | 104 | #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" |
| 106 | 105 | ||
| 107 | #define IWL3165_FW_PRE "iwlwifi-3165-" | ||
| 108 | #define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" | ||
| 109 | |||
| 110 | #define IWL7265_FW_PRE "iwlwifi-7265-" | 106 | #define IWL7265_FW_PRE "iwlwifi-7265-" |
| 111 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" | 107 | #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" |
| 112 | 108 | ||
| @@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = { | |||
| 248 | 244 | ||
| 249 | const struct iwl_cfg iwl3165_2ac_cfg = { | 245 | const struct iwl_cfg iwl3165_2ac_cfg = { |
| 250 | .name = "Intel(R) Dual Band Wireless AC 3165", | 246 | .name = "Intel(R) Dual Band Wireless AC 3165", |
| 251 | .fw_name_pre = IWL3165_FW_PRE, | 247 | .fw_name_pre = IWL7265D_FW_PRE, |
| 252 | IWL_DEVICE_7000, | 248 | IWL_DEVICE_7000, |
| 249 | /* sparse doens't like the re-assignment but it is safe */ | ||
| 250 | #ifndef __CHECKER__ | ||
| 251 | .ucode_api_ok = IWL3165_UCODE_API_OK, | ||
| 252 | .ucode_api_min = IWL3165_UCODE_API_MIN, | ||
| 253 | #endif | ||
| 253 | .ht_params = &iwl7000_ht_params, | 254 | .ht_params = &iwl7000_ht_params, |
| 254 | .nvm_ver = IWL3165_NVM_VERSION, | 255 | .nvm_ver = IWL3165_NVM_VERSION, |
| 255 | .nvm_calib_ver = IWL3165_TX_POWER_VERSION, | 256 | .nvm_calib_ver = IWL3165_TX_POWER_VERSION, |
| @@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = { | |||
| 325 | 326 | ||
| 326 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 327 | MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
| 327 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | 328 | MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); |
| 328 | MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); | ||
| 329 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 329 | MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
| 330 | MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); | 330 | MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 41ff85de7334..21302b6f2bfd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2015 Intel Mobile Communications GmbH | ||
| 9 | * | 10 | * |
| 10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -31,6 +32,7 @@ | |||
| 31 | * BSD LICENSE | 32 | * BSD LICENSE |
| 32 | * | 33 | * |
| 33 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
| 35 | * Copyright(c) 2015 Intel Mobile Communications GmbH | ||
| 34 | * All rights reserved. | 36 | * All rights reserved. |
| 35 | * | 37 | * |
| 36 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without |
| @@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, | |||
| 748 | return; | 750 | return; |
| 749 | } | 751 | } |
| 750 | 752 | ||
| 753 | if (data->sku_cap_mimo_disabled) | ||
| 754 | rx_chains = 1; | ||
| 755 | |||
| 751 | ht_info->ht_supported = true; | 756 | ht_info->ht_supported = true; |
| 752 | ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; | 757 | ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; |
| 753 | 758 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h index 5234a0bf11e4..750c8c9ee70d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2015 Intel Mobile Communications GmbH | ||
| 9 | * | 10 | * |
| 10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -31,6 +32,7 @@ | |||
| 31 | * BSD LICENSE | 32 | * BSD LICENSE |
| 32 | * | 33 | * |
| 33 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
| 35 | * Copyright(c) 2015 Intel Mobile Communications GmbH | ||
| 34 | * All rights reserved. | 36 | * All rights reserved. |
| 35 | * | 37 | * |
| 36 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without |
| @@ -84,6 +86,7 @@ struct iwl_nvm_data { | |||
| 84 | bool sku_cap_11ac_enable; | 86 | bool sku_cap_11ac_enable; |
| 85 | bool sku_cap_amt_enable; | 87 | bool sku_cap_amt_enable; |
| 86 | bool sku_cap_ipan_enable; | 88 | bool sku_cap_ipan_enable; |
| 89 | bool sku_cap_mimo_disabled; | ||
| 87 | 90 | ||
| 88 | u16 radio_cfg_type; | 91 | u16 radio_cfg_type; |
| 89 | u8 radio_cfg_step; | 92 | u8 radio_cfg_step; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index bfdf3faa6c47..62db2e5e45eb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h | |||
| @@ -244,6 +244,7 @@ enum iwl_ucode_tlv_flag { | |||
| 244 | * longer than the passive one, which is essential for fragmented scan. | 244 | * longer than the passive one, which is essential for fragmented scan. |
| 245 | * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. | 245 | * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. |
| 246 | * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR | 246 | * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR |
| 247 | * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power. | ||
| 247 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, | 248 | * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, |
| 248 | * regardless of the band or the number of the probes. FW will calculate | 249 | * regardless of the band or the number of the probes. FW will calculate |
| 249 | * the actual dwell time. | 250 | * the actual dwell time. |
| @@ -260,6 +261,7 @@ enum iwl_ucode_tlv_api { | |||
| 260 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), | 261 | IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), |
| 261 | IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), | 262 | IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), |
| 262 | IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), | 263 | IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), |
| 264 | IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11), | ||
| 263 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), | 265 | IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), |
| 264 | IWL_UCODE_TLV_API_SCD_CFG = BIT(15), | 266 | IWL_UCODE_TLV_API_SCD_CFG = BIT(15), |
| 265 | IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), | 267 | IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), |
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 83903a5025c2..8e604a3931ca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -32,7 +32,7 @@ | |||
| 32 | * BSD LICENSE | 32 | * BSD LICENSE |
| 33 | * | 33 | * |
| 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
| 35 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 35 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 36 | * All rights reserved. | 36 | * All rights reserved. |
| 37 | * | 37 | * |
| 38 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without |
| @@ -116,10 +116,11 @@ enum family_8000_nvm_offsets { | |||
| 116 | 116 | ||
| 117 | /* SKU Capabilities (actual values from NVM definition) */ | 117 | /* SKU Capabilities (actual values from NVM definition) */ |
| 118 | enum nvm_sku_bits { | 118 | enum nvm_sku_bits { |
| 119 | NVM_SKU_CAP_BAND_24GHZ = BIT(0), | 119 | NVM_SKU_CAP_BAND_24GHZ = BIT(0), |
| 120 | NVM_SKU_CAP_BAND_52GHZ = BIT(1), | 120 | NVM_SKU_CAP_BAND_52GHZ = BIT(1), |
| 121 | NVM_SKU_CAP_11N_ENABLE = BIT(2), | 121 | NVM_SKU_CAP_11N_ENABLE = BIT(2), |
| 122 | NVM_SKU_CAP_11AC_ENABLE = BIT(3), | 122 | NVM_SKU_CAP_11AC_ENABLE = BIT(3), |
| 123 | NVM_SKU_CAP_MIMO_DISABLE = BIT(5), | ||
| 123 | }; | 124 | }; |
| 124 | 125 | ||
| 125 | /* | 126 | /* |
| @@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, | |||
| 368 | if (cfg->ht_params->ldpc) | 369 | if (cfg->ht_params->ldpc) |
| 369 | vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; | 370 | vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; |
| 370 | 371 | ||
| 372 | if (data->sku_cap_mimo_disabled) { | ||
| 373 | num_rx_ants = 1; | ||
| 374 | num_tx_ants = 1; | ||
| 375 | } | ||
| 376 | |||
| 371 | if (num_tx_ants > 1) | 377 | if (num_tx_ants > 1) |
| 372 | vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; | 378 | vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; |
| 373 | else | 379 | else |
| @@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, | |||
| 465 | if (cfg->device_family != IWL_DEVICE_FAMILY_8000) | 471 | if (cfg->device_family != IWL_DEVICE_FAMILY_8000) |
| 466 | return le16_to_cpup(nvm_sw + RADIO_CFG); | 472 | return le16_to_cpup(nvm_sw + RADIO_CFG); |
| 467 | 473 | ||
| 468 | return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); | 474 | return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); |
| 469 | 475 | ||
| 470 | } | 476 | } |
| 471 | 477 | ||
| @@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev, | |||
| 527 | const u8 *hw_addr; | 533 | const u8 *hw_addr; |
| 528 | 534 | ||
| 529 | if (mac_override) { | 535 | if (mac_override) { |
| 536 | static const u8 reserved_mac[] = { | ||
| 537 | 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 | ||
| 538 | }; | ||
| 539 | |||
| 530 | hw_addr = (const u8 *)(mac_override + | 540 | hw_addr = (const u8 *)(mac_override + |
| 531 | MAC_ADDRESS_OVERRIDE_FAMILY_8000); | 541 | MAC_ADDRESS_OVERRIDE_FAMILY_8000); |
| 532 | 542 | ||
| @@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev, | |||
| 538 | data->hw_addr[4] = hw_addr[5]; | 548 | data->hw_addr[4] = hw_addr[5]; |
| 539 | data->hw_addr[5] = hw_addr[4]; | 549 | data->hw_addr[5] = hw_addr[4]; |
| 540 | 550 | ||
| 541 | if (is_valid_ether_addr(data->hw_addr)) | 551 | /* |
| 552 | * Force the use of the OTP MAC address in case of reserved MAC | ||
| 553 | * address in the NVM, or if address is given but invalid. | ||
| 554 | */ | ||
| 555 | if (is_valid_ether_addr(data->hw_addr) && | ||
| 556 | memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) | ||
| 542 | return; | 557 | return; |
| 543 | 558 | ||
| 544 | IWL_ERR_DEV(dev, | 559 | IWL_ERR_DEV(dev, |
| @@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, | |||
| 610 | data->sku_cap_11n_enable = false; | 625 | data->sku_cap_11n_enable = false; |
| 611 | data->sku_cap_11ac_enable = data->sku_cap_11n_enable && | 626 | data->sku_cap_11ac_enable = data->sku_cap_11n_enable && |
| 612 | (sku & NVM_SKU_CAP_11AC_ENABLE); | 627 | (sku & NVM_SKU_CAP_11AC_ENABLE); |
| 628 | data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; | ||
| 613 | 629 | ||
| 614 | data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); | 630 | data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); |
| 615 | 631 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 6dfed1259260..56254a837214 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -32,7 +32,7 @@ | |||
| 32 | * BSD LICENSE | 32 | * BSD LICENSE |
| 33 | * | 33 | * |
| 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
| 35 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 35 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 36 | * All rights reserved. | 36 | * All rights reserved. |
| 37 | * | 37 | * |
| 38 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without |
| @@ -421,8 +421,9 @@ struct iwl_trans_txq_scd_cfg { | |||
| 421 | * | 421 | * |
| 422 | * All the handlers MUST be implemented | 422 | * All the handlers MUST be implemented |
| 423 | * | 423 | * |
| 424 | * @start_hw: starts the HW- from that point on, the HW can send interrupts | 424 | * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken |
| 425 | * May sleep | 425 | * out of a low power state. From that point on, the HW can send |
| 426 | * interrupts. May sleep. | ||
| 426 | * @op_mode_leave: Turn off the HW RF kill indication if on | 427 | * @op_mode_leave: Turn off the HW RF kill indication if on |
| 427 | * May sleep | 428 | * May sleep |
| 428 | * @start_fw: allocates and inits all the resources for the transport | 429 | * @start_fw: allocates and inits all the resources for the transport |
| @@ -432,10 +433,11 @@ struct iwl_trans_txq_scd_cfg { | |||
| 432 | * the SCD base address in SRAM, then provide it here, or 0 otherwise. | 433 | * the SCD base address in SRAM, then provide it here, or 0 otherwise. |
| 433 | * May sleep | 434 | * May sleep |
| 434 | * @stop_device: stops the whole device (embedded CPU put to reset) and stops | 435 | * @stop_device: stops the whole device (embedded CPU put to reset) and stops |
| 435 | * the HW. From that point on, the HW will be in low power but will still | 436 | * the HW. If low_power is true, the NIC will be put in low power state. |
| 436 | * issue interrupt if the HW RF kill is triggered. This callback must do | 437 | * From that point on, the HW will be stopped but will still issue an |
| 437 | * the right thing and not crash even if start_hw() was called but not | 438 | * interrupt if the HW RF kill switch is triggered. |
| 438 | * start_fw(). May sleep | 439 | * This callback must do the right thing and not crash even if %start_hw() |
| 440 | * was called but not &start_fw(). May sleep. | ||
| 439 | * @d3_suspend: put the device into the correct mode for WoWLAN during | 441 | * @d3_suspend: put the device into the correct mode for WoWLAN during |
| 440 | * suspend. This is optional, if not implemented WoWLAN will not be | 442 | * suspend. This is optional, if not implemented WoWLAN will not be |
| 441 | * supported. This callback may sleep. | 443 | * supported. This callback may sleep. |
| @@ -491,14 +493,14 @@ struct iwl_trans_txq_scd_cfg { | |||
| 491 | */ | 493 | */ |
| 492 | struct iwl_trans_ops { | 494 | struct iwl_trans_ops { |
| 493 | 495 | ||
| 494 | int (*start_hw)(struct iwl_trans *iwl_trans); | 496 | int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power); |
| 495 | void (*op_mode_leave)(struct iwl_trans *iwl_trans); | 497 | void (*op_mode_leave)(struct iwl_trans *iwl_trans); |
| 496 | int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, | 498 | int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, |
| 497 | bool run_in_rfkill); | 499 | bool run_in_rfkill); |
| 498 | int (*update_sf)(struct iwl_trans *trans, | 500 | int (*update_sf)(struct iwl_trans *trans, |
| 499 | struct iwl_sf_region *st_fwrd_space); | 501 | struct iwl_sf_region *st_fwrd_space); |
| 500 | void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); | 502 | void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); |
| 501 | void (*stop_device)(struct iwl_trans *trans); | 503 | void (*stop_device)(struct iwl_trans *trans, bool low_power); |
| 502 | 504 | ||
| 503 | void (*d3_suspend)(struct iwl_trans *trans, bool test); | 505 | void (*d3_suspend)(struct iwl_trans *trans, bool test); |
| 504 | int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, | 506 | int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, |
| @@ -652,11 +654,16 @@ static inline void iwl_trans_configure(struct iwl_trans *trans, | |||
| 652 | trans->ops->configure(trans, trans_cfg); | 654 | trans->ops->configure(trans, trans_cfg); |
| 653 | } | 655 | } |
| 654 | 656 | ||
| 655 | static inline int iwl_trans_start_hw(struct iwl_trans *trans) | 657 | static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power) |
| 656 | { | 658 | { |
| 657 | might_sleep(); | 659 | might_sleep(); |
| 658 | 660 | ||
| 659 | return trans->ops->start_hw(trans); | 661 | return trans->ops->start_hw(trans, low_power); |
| 662 | } | ||
| 663 | |||
| 664 | static inline int iwl_trans_start_hw(struct iwl_trans *trans) | ||
| 665 | { | ||
| 666 | return trans->ops->start_hw(trans, true); | ||
| 660 | } | 667 | } |
| 661 | 668 | ||
| 662 | static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) | 669 | static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) |
| @@ -703,15 +710,21 @@ static inline int iwl_trans_update_sf(struct iwl_trans *trans, | |||
| 703 | return 0; | 710 | return 0; |
| 704 | } | 711 | } |
| 705 | 712 | ||
| 706 | static inline void iwl_trans_stop_device(struct iwl_trans *trans) | 713 | static inline void _iwl_trans_stop_device(struct iwl_trans *trans, |
| 714 | bool low_power) | ||
| 707 | { | 715 | { |
| 708 | might_sleep(); | 716 | might_sleep(); |
| 709 | 717 | ||
| 710 | trans->ops->stop_device(trans); | 718 | trans->ops->stop_device(trans, low_power); |
| 711 | 719 | ||
| 712 | trans->state = IWL_TRANS_NO_FW; | 720 | trans->state = IWL_TRANS_NO_FW; |
| 713 | } | 721 | } |
| 714 | 722 | ||
| 723 | static inline void iwl_trans_stop_device(struct iwl_trans *trans) | ||
| 724 | { | ||
| 725 | _iwl_trans_stop_device(trans, true); | ||
| 726 | } | ||
| 727 | |||
| 715 | static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) | 728 | static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) |
| 716 | { | 729 | { |
| 717 | might_sleep(); | 730 | might_sleep(); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d954591e0be5..6ac6de2af977 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c | |||
| @@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, | |||
| 776 | struct iwl_host_cmd cmd = { | 776 | struct iwl_host_cmd cmd = { |
| 777 | .id = BT_CONFIG, | 777 | .id = BT_CONFIG, |
| 778 | .len = { sizeof(*bt_cmd), }, | 778 | .len = { sizeof(*bt_cmd), }, |
| 779 | .dataflags = { IWL_HCMD_DFL_NOCOPY, }, | 779 | .dataflags = { IWL_HCMD_DFL_DUP, }, |
| 780 | .flags = CMD_ASYNC, | 780 | .flags = CMD_ASYNC, |
| 781 | }; | 781 | }; |
| 782 | struct iwl_mvm_sta *mvmsta; | 782 | struct iwl_mvm_sta *mvmsta; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index a6c48c7b1e16..4310cf102d78 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c | |||
| @@ -1726,7 +1726,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, | |||
| 1726 | results->matched_profiles = le32_to_cpu(query->matched_profiles); | 1726 | results->matched_profiles = le32_to_cpu(query->matched_profiles); |
| 1727 | memcpy(results->matches, query->matches, sizeof(results->matches)); | 1727 | memcpy(results->matches, query->matches, sizeof(results->matches)); |
| 1728 | 1728 | ||
| 1729 | #ifdef CPTCFG_IWLWIFI_DEBUGFS | 1729 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
| 1730 | mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); | 1730 | mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); |
| 1731 | #endif | 1731 | #endif |
| 1732 | 1732 | ||
| @@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, | |||
| 1750 | int i, j, n_matches, ret; | 1750 | int i, j, n_matches, ret; |
| 1751 | 1751 | ||
| 1752 | fw_status = iwl_mvm_get_wakeup_status(mvm, vif); | 1752 | fw_status = iwl_mvm_get_wakeup_status(mvm, vif); |
| 1753 | if (!IS_ERR_OR_NULL(fw_status)) | 1753 | if (!IS_ERR_OR_NULL(fw_status)) { |
| 1754 | reasons = le32_to_cpu(fw_status->wakeup_reasons); | 1754 | reasons = le32_to_cpu(fw_status->wakeup_reasons); |
| 1755 | kfree(fw_status); | ||
| 1756 | } | ||
| 1755 | 1757 | ||
| 1756 | if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) | 1758 | if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) |
| 1757 | wakeup.rfkill_release = true; | 1759 | wakeup.rfkill_release = true; |
| @@ -1868,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) | |||
| 1868 | /* get the BSS vif pointer again */ | 1870 | /* get the BSS vif pointer again */ |
| 1869 | vif = iwl_mvm_get_bss_vif(mvm); | 1871 | vif = iwl_mvm_get_bss_vif(mvm); |
| 1870 | if (IS_ERR_OR_NULL(vif)) | 1872 | if (IS_ERR_OR_NULL(vif)) |
| 1871 | goto out_unlock; | 1873 | goto err; |
| 1872 | 1874 | ||
| 1873 | ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); | 1875 | ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); |
| 1874 | if (ret) | 1876 | if (ret) |
| 1875 | goto out_unlock; | 1877 | goto err; |
| 1876 | 1878 | ||
| 1877 | if (d3_status != IWL_D3_STATUS_ALIVE) { | 1879 | if (d3_status != IWL_D3_STATUS_ALIVE) { |
| 1878 | IWL_INFO(mvm, "Device was reset during suspend\n"); | 1880 | IWL_INFO(mvm, "Device was reset during suspend\n"); |
| 1879 | goto out_unlock; | 1881 | goto err; |
| 1880 | } | 1882 | } |
| 1881 | 1883 | ||
| 1882 | /* query SRAM first in case we want event logging */ | 1884 | /* query SRAM first in case we want event logging */ |
| @@ -1902,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) | |||
| 1902 | goto out_iterate; | 1904 | goto out_iterate; |
| 1903 | } | 1905 | } |
| 1904 | 1906 | ||
| 1905 | out_unlock: | 1907 | err: |
| 1908 | iwl_mvm_free_nd(mvm); | ||
| 1906 | mutex_unlock(&mvm->mutex); | 1909 | mutex_unlock(&mvm->mutex); |
| 1907 | 1910 | ||
| 1908 | out_iterate: | 1911 | out_iterate: |
| @@ -1915,6 +1918,14 @@ out: | |||
| 1915 | /* return 1 to reconfigure the device */ | 1918 | /* return 1 to reconfigure the device */ |
| 1916 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); | 1919 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); |
| 1917 | set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); | 1920 | set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); |
| 1921 | |||
| 1922 | /* We always return 1, which causes mac80211 to do a reconfig | ||
| 1923 | * with IEEE80211_RECONFIG_TYPE_RESTART. This type of | ||
| 1924 | * reconfig calls iwl_mvm_restart_complete(), where we unref | ||
| 1925 | * the IWL_MVM_REF_UCODE_DOWN, so we need to take the | ||
| 1926 | * reference here. | ||
| 1927 | */ | ||
| 1928 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); | ||
| 1918 | return 1; | 1929 | return 1; |
| 1919 | } | 1930 | } |
| 1920 | 1931 | ||
| @@ -2021,7 +2032,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | |||
| 2021 | __iwl_mvm_resume(mvm, true); | 2032 | __iwl_mvm_resume(mvm, true); |
| 2022 | rtnl_unlock(); | 2033 | rtnl_unlock(); |
| 2023 | iwl_abort_notification_waits(&mvm->notif_wait); | 2034 | iwl_abort_notification_waits(&mvm->notif_wait); |
| 2024 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); | ||
| 2025 | ieee80211_restart_hw(mvm->hw); | 2035 | ieee80211_restart_hw(mvm->hw); |
| 2026 | 2036 | ||
| 2027 | /* wait for restart and disconnect all interfaces */ | 2037 | /* wait for restart and disconnect all interfaces */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index 4fc0938b3fb6..b1baa33cc19b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h | |||
| @@ -298,6 +298,40 @@ struct iwl_uapsd_misbehaving_ap_notif { | |||
| 298 | } __packed; | 298 | } __packed; |
| 299 | 299 | ||
| 300 | /** | 300 | /** |
| 301 | * struct iwl_reduce_tx_power_cmd - TX power reduction command | ||
| 302 | * REDUCE_TX_POWER_CMD = 0x9f | ||
| 303 | * @flags: (reserved for future implementation) | ||
| 304 | * @mac_context_id: id of the mac ctx for which we are reducing TX power. | ||
| 305 | * @pwr_restriction: TX power restriction in dBms. | ||
| 306 | */ | ||
| 307 | struct iwl_reduce_tx_power_cmd { | ||
| 308 | u8 flags; | ||
| 309 | u8 mac_context_id; | ||
| 310 | __le16 pwr_restriction; | ||
| 311 | } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ | ||
| 312 | |||
| 313 | /** | ||
| 314 | * struct iwl_dev_tx_power_cmd - TX power reduction command | ||
| 315 | * REDUCE_TX_POWER_CMD = 0x9f | ||
| 316 | * @set_mode: 0 - MAC tx power, 1 - device tx power | ||
| 317 | * @mac_context_id: id of the mac ctx for which we are reducing TX power. | ||
| 318 | * @pwr_restriction: TX power restriction in 1/8 dBms. | ||
| 319 | * @dev_24: device TX power restriction in 1/8 dBms | ||
| 320 | * @dev_52_low: device TX power restriction upper band - low | ||
| 321 | * @dev_52_high: device TX power restriction upper band - high | ||
| 322 | */ | ||
| 323 | struct iwl_dev_tx_power_cmd { | ||
| 324 | __le32 set_mode; | ||
| 325 | __le32 mac_context_id; | ||
| 326 | __le16 pwr_restriction; | ||
| 327 | __le16 dev_24; | ||
| 328 | __le16 dev_52_low; | ||
| 329 | __le16 dev_52_high; | ||
| 330 | } __packed; /* TX_REDUCED_POWER_API_S_VER_2 */ | ||
| 331 | |||
| 332 | #define IWL_DEV_MAX_TX_POWER 0x7FFF | ||
| 333 | |||
| 334 | /** | ||
| 301 | * struct iwl_beacon_filter_cmd | 335 | * struct iwl_beacon_filter_cmd |
| 302 | * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) | 336 | * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) |
| 303 | * @id_and_color: MAC contex identifier | 337 | * @id_and_color: MAC contex identifier |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 4f81dcf57a73..d6cced47d561 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
| @@ -122,46 +122,6 @@ enum iwl_scan_complete_status { | |||
| 122 | SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, | 122 | SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, |
| 123 | }; | 123 | }; |
| 124 | 124 | ||
| 125 | /** | ||
| 126 | * struct iwl_scan_results_notif - scan results for one channel | ||
| 127 | * ( SCAN_RESULTS_NOTIFICATION = 0x83 ) | ||
| 128 | * @channel: which channel the results are from | ||
| 129 | * @band: 0 for 5.2 GHz, 1 for 2.4 GHz | ||
| 130 | * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request | ||
| 131 | * @num_probe_not_sent: # of request that weren't sent due to not enough time | ||
| 132 | * @duration: duration spent in channel, in usecs | ||
| 133 | * @statistics: statistics gathered for this channel | ||
| 134 | */ | ||
| 135 | struct iwl_scan_results_notif { | ||
| 136 | u8 channel; | ||
| 137 | u8 band; | ||
| 138 | u8 probe_status; | ||
| 139 | u8 num_probe_not_sent; | ||
| 140 | __le32 duration; | ||
| 141 | __le32 statistics[SCAN_RESULTS_STATISTICS]; | ||
| 142 | } __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */ | ||
| 143 | |||
| 144 | /** | ||
| 145 | * struct iwl_scan_complete_notif - notifies end of scanning (all channels) | ||
| 146 | * ( SCAN_COMPLETE_NOTIFICATION = 0x84 ) | ||
| 147 | * @scanned_channels: number of channels scanned (and number of valid results) | ||
| 148 | * @status: one of SCAN_COMP_STATUS_* | ||
| 149 | * @bt_status: BT on/off status | ||
| 150 | * @last_channel: last channel that was scanned | ||
| 151 | * @tsf_low: TSF timer (lower half) in usecs | ||
| 152 | * @tsf_high: TSF timer (higher half) in usecs | ||
| 153 | * @results: array of scan results, only "scanned_channels" of them are valid | ||
| 154 | */ | ||
| 155 | struct iwl_scan_complete_notif { | ||
| 156 | u8 scanned_channels; | ||
| 157 | u8 status; | ||
| 158 | u8 bt_status; | ||
| 159 | u8 last_channel; | ||
| 160 | __le32 tsf_low; | ||
| 161 | __le32 tsf_high; | ||
| 162 | struct iwl_scan_results_notif results[]; | ||
| 163 | } __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */ | ||
| 164 | |||
| 165 | /* scan offload */ | 125 | /* scan offload */ |
| 166 | #define IWL_SCAN_MAX_BLACKLIST_LEN 64 | 126 | #define IWL_SCAN_MAX_BLACKLIST_LEN 64 |
| 167 | #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 | 127 | #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 |
| @@ -554,7 +514,7 @@ struct iwl_scan_req_unified_lmac { | |||
| 554 | } __packed; | 514 | } __packed; |
| 555 | 515 | ||
| 556 | /** | 516 | /** |
| 557 | * struct iwl_lmac_scan_results_notif - scan results for one channel - | 517 | * struct iwl_scan_results_notif - scan results for one channel - |
| 558 | * SCAN_RESULT_NTF_API_S_VER_3 | 518 | * SCAN_RESULT_NTF_API_S_VER_3 |
| 559 | * @channel: which channel the results are from | 519 | * @channel: which channel the results are from |
| 560 | * @band: 0 for 5.2 GHz, 1 for 2.4 GHz | 520 | * @band: 0 for 5.2 GHz, 1 for 2.4 GHz |
| @@ -562,7 +522,7 @@ struct iwl_scan_req_unified_lmac { | |||
| 562 | * @num_probe_not_sent: # of request that weren't sent due to not enough time | 522 | * @num_probe_not_sent: # of request that weren't sent due to not enough time |
| 563 | * @duration: duration spent in channel, in usecs | 523 | * @duration: duration spent in channel, in usecs |
| 564 | */ | 524 | */ |
| 565 | struct iwl_lmac_scan_results_notif { | 525 | struct iwl_scan_results_notif { |
| 566 | u8 channel; | 526 | u8 channel; |
| 567 | u8 band; | 527 | u8 band; |
| 568 | u8 probe_status; | 528 | u8 probe_status; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index aab68cbae754..01b1da6ad359 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h | |||
| @@ -281,19 +281,6 @@ struct iwl_tx_ant_cfg_cmd { | |||
| 281 | __le32 valid; | 281 | __le32 valid; |
| 282 | } __packed; | 282 | } __packed; |
| 283 | 283 | ||
| 284 | /** | ||
| 285 | * struct iwl_reduce_tx_power_cmd - TX power reduction command | ||
| 286 | * REDUCE_TX_POWER_CMD = 0x9f | ||
| 287 | * @flags: (reserved for future implementation) | ||
| 288 | * @mac_context_id: id of the mac ctx for which we are reducing TX power. | ||
| 289 | * @pwr_restriction: TX power restriction in dBms. | ||
| 290 | */ | ||
| 291 | struct iwl_reduce_tx_power_cmd { | ||
| 292 | u8 flags; | ||
| 293 | u8 mac_context_id; | ||
| 294 | __le16 pwr_restriction; | ||
| 295 | } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ | ||
| 296 | |||
| 297 | /* | 284 | /* |
| 298 | * Calibration control struct. | 285 | * Calibration control struct. |
| 299 | * Sent as part of the phy configuration command. | 286 | * Sent as part of the phy configuration command. |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index bc5eac4960e1..df869633f4dd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -32,7 +32,7 @@ | |||
| 32 | * BSD LICENSE | 32 | * BSD LICENSE |
| 33 | * | 33 | * |
| 34 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 35 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 35 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 36 | * All rights reserved. | 36 | * All rights reserved. |
| 37 | * | 37 | * |
| 38 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without |
| @@ -322,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) | |||
| 322 | 322 | ||
| 323 | lockdep_assert_held(&mvm->mutex); | 323 | lockdep_assert_held(&mvm->mutex); |
| 324 | 324 | ||
| 325 | if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating)) | 325 | if (WARN_ON_ONCE(mvm->calibrating)) |
| 326 | return 0; | 326 | return 0; |
| 327 | 327 | ||
| 328 | iwl_init_notification_wait(&mvm->notif_wait, | 328 | iwl_init_notification_wait(&mvm->notif_wait, |
| @@ -396,8 +396,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) | |||
| 396 | */ | 396 | */ |
| 397 | ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, | 397 | ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, |
| 398 | MVM_UCODE_CALIB_TIMEOUT); | 398 | MVM_UCODE_CALIB_TIMEOUT); |
| 399 | if (!ret) | ||
| 400 | mvm->init_ucode_complete = true; | ||
| 401 | 399 | ||
| 402 | if (ret && iwl_mvm_is_radio_killed(mvm)) { | 400 | if (ret && iwl_mvm_is_radio_killed(mvm)) { |
| 403 | IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); | 401 | IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); |
| @@ -494,15 +492,6 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, | |||
| 494 | 492 | ||
| 495 | mvm->fw_dump_desc = desc; | 493 | mvm->fw_dump_desc = desc; |
| 496 | 494 | ||
| 497 | /* stop recording */ | ||
| 498 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | ||
| 499 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); | ||
| 500 | } else { | ||
| 501 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); | ||
| 502 | /* wait before we collect the data till the DBGC stop */ | ||
| 503 | udelay(100); | ||
| 504 | } | ||
| 505 | |||
| 506 | queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); | 495 | queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); |
| 507 | 496 | ||
| 508 | return 0; | 497 | return 0; |
| @@ -658,25 +647,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm) | |||
| 658 | * module loading, load init ucode now | 647 | * module loading, load init ucode now |
| 659 | * (for example, if we were in RFKILL) | 648 | * (for example, if we were in RFKILL) |
| 660 | */ | 649 | */ |
| 661 | if (!mvm->init_ucode_complete) { | 650 | ret = iwl_run_init_mvm_ucode(mvm, false); |
| 662 | ret = iwl_run_init_mvm_ucode(mvm, false); | 651 | if (ret && !iwlmvm_mod_params.init_dbg) { |
| 663 | if (ret && !iwlmvm_mod_params.init_dbg) { | 652 | IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); |
| 664 | IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); | 653 | /* this can't happen */ |
| 665 | /* this can't happen */ | 654 | if (WARN_ON(ret > 0)) |
| 666 | if (WARN_ON(ret > 0)) | 655 | ret = -ERFKILL; |
| 667 | ret = -ERFKILL; | 656 | goto error; |
| 668 | goto error; | 657 | } |
| 669 | } | 658 | if (!iwlmvm_mod_params.init_dbg) { |
| 670 | if (!iwlmvm_mod_params.init_dbg) { | 659 | /* |
| 671 | /* | 660 | * Stop and start the transport without entering low power |
| 672 | * should stop and start HW since that INIT | 661 | * mode. This will save the state of other components on the |
| 673 | * image just loaded | 662 | * device that are triggered by the INIT firwmare (MFUART). |
| 674 | */ | 663 | */ |
| 675 | iwl_trans_stop_device(mvm->trans); | 664 | _iwl_trans_stop_device(mvm->trans, false); |
| 676 | ret = iwl_trans_start_hw(mvm->trans); | 665 | _iwl_trans_start_hw(mvm->trans, false); |
| 677 | if (ret) | 666 | if (ret) |
| 678 | return ret; | 667 | return ret; |
| 679 | } | ||
| 680 | } | 668 | } |
| 681 | 669 | ||
| 682 | if (iwlmvm_mod_params.init_dbg) | 670 | if (iwlmvm_mod_params.init_dbg) |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 84555170b6f7..dda9f7b5f342 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
| @@ -1322,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) | |||
| 1322 | 1322 | ||
| 1323 | clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); | 1323 | clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); |
| 1324 | iwl_mvm_d0i3_enable_tx(mvm, NULL); | 1324 | iwl_mvm_d0i3_enable_tx(mvm, NULL); |
| 1325 | ret = iwl_mvm_update_quotas(mvm, false, NULL); | 1325 | ret = iwl_mvm_update_quotas(mvm, true, NULL); |
| 1326 | if (ret) | 1326 | if (ret) |
| 1327 | IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", | 1327 | IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", |
| 1328 | ret); | 1328 | ret); |
| @@ -1471,8 +1471,8 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) | |||
| 1471 | return NULL; | 1471 | return NULL; |
| 1472 | } | 1472 | } |
| 1473 | 1473 | ||
| 1474 | static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | 1474 | static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm, |
| 1475 | s8 tx_power) | 1475 | struct ieee80211_vif *vif, s8 tx_power) |
| 1476 | { | 1476 | { |
| 1477 | /* FW is in charge of regulatory enforcement */ | 1477 | /* FW is in charge of regulatory enforcement */ |
| 1478 | struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = { | 1478 | struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = { |
| @@ -1485,6 +1485,26 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
| 1485 | &reduce_txpwr_cmd); | 1485 | &reduce_txpwr_cmd); |
| 1486 | } | 1486 | } |
| 1487 | 1487 | ||
| 1488 | static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | ||
| 1489 | s16 tx_power) | ||
| 1490 | { | ||
| 1491 | struct iwl_dev_tx_power_cmd cmd = { | ||
| 1492 | .set_mode = 0, | ||
| 1493 | .mac_context_id = | ||
| 1494 | cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), | ||
| 1495 | .pwr_restriction = cpu_to_le16(8 * tx_power), | ||
| 1496 | }; | ||
| 1497 | |||
| 1498 | if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV)) | ||
| 1499 | return iwl_mvm_set_tx_power_old(mvm, vif, tx_power); | ||
| 1500 | |||
| 1501 | if (tx_power == IWL_DEFAULT_MAX_TX_POWER) | ||
| 1502 | cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); | ||
| 1503 | |||
| 1504 | return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, | ||
| 1505 | sizeof(cmd), &cmd); | ||
| 1506 | } | ||
| 1507 | |||
| 1488 | static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, | 1508 | static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, |
| 1489 | struct ieee80211_vif *vif) | 1509 | struct ieee80211_vif *vif) |
| 1490 | { | 1510 | { |
| @@ -3975,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, | |||
| 3975 | if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) | 3995 | if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) |
| 3976 | return; | 3996 | return; |
| 3977 | 3997 | ||
| 3978 | if (event->u.mlme.status == MLME_SUCCESS) | ||
| 3979 | return; | ||
| 3980 | |||
| 3981 | trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); | 3998 | trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); |
| 3982 | trig_mlme = (void *)trig->data; | 3999 | trig_mlme = (void *)trig->data; |
| 3983 | if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) | 4000 | if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d5522a161242..cf70f681d1ac 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h | |||
| @@ -603,7 +603,6 @@ struct iwl_mvm { | |||
| 603 | 603 | ||
| 604 | enum iwl_ucode_type cur_ucode; | 604 | enum iwl_ucode_type cur_ucode; |
| 605 | bool ucode_loaded; | 605 | bool ucode_loaded; |
| 606 | bool init_ucode_complete; | ||
| 607 | bool calibrating; | 606 | bool calibrating; |
| 608 | u32 error_event_table; | 607 | u32 error_event_table; |
| 609 | u32 log_event_table; | 608 | u32 log_event_table; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index a08b03d58d4b..2ea01238754e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c | |||
| @@ -865,6 +865,16 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) | |||
| 865 | return; | 865 | return; |
| 866 | 866 | ||
| 867 | mutex_lock(&mvm->mutex); | 867 | mutex_lock(&mvm->mutex); |
| 868 | |||
| 869 | /* stop recording */ | ||
| 870 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | ||
| 871 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); | ||
| 872 | } else { | ||
| 873 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); | ||
| 874 | /* wait before we collect the data till the DBGC stop */ | ||
| 875 | udelay(100); | ||
| 876 | } | ||
| 877 | |||
| 868 | iwl_mvm_fw_error_dump(mvm); | 878 | iwl_mvm_fw_error_dump(mvm); |
| 869 | 879 | ||
| 870 | /* start recording again if the firmware is not crashed */ | 880 | /* start recording again if the firmware is not crashed */ |
| @@ -1253,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) | |||
| 1253 | ieee80211_iterate_active_interfaces( | 1263 | ieee80211_iterate_active_interfaces( |
| 1254 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, | 1264 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, |
| 1255 | iwl_mvm_d0i3_disconnect_iter, mvm); | 1265 | iwl_mvm_d0i3_disconnect_iter, mvm); |
| 1256 | |||
| 1257 | iwl_free_resp(&get_status_cmd); | ||
| 1258 | out: | 1266 | out: |
| 1259 | iwl_mvm_d0i3_enable_tx(mvm, qos_seq); | 1267 | iwl_mvm_d0i3_enable_tx(mvm, qos_seq); |
| 1260 | 1268 | ||
| 1269 | /* qos_seq might point inside resp_pkt, so free it only now */ | ||
| 1270 | if (get_status_cmd.resp_pkt) | ||
| 1271 | iwl_free_resp(&get_status_cmd); | ||
| 1272 | |||
| 1261 | /* the FW might have updated the regdomain */ | 1273 | /* the FW might have updated the regdomain */ |
| 1262 | iwl_mvm_update_changed_regdom(mvm); | 1274 | iwl_mvm_update_changed_regdom(mvm); |
| 1263 | 1275 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index f9928f2c125f..33cd68ae7bf9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c | |||
| @@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 180 | if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) | 180 | if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) |
| 181 | return false; | 181 | return false; |
| 182 | 182 | ||
| 183 | if (mvm->nvm_data->sku_cap_mimo_disabled) | ||
| 184 | return false; | ||
| 185 | |||
| 183 | return true; | 186 | return true; |
| 184 | } | 187 | } |
| 185 | 188 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 78ec7db64ba5..d6314ddf57b5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c | |||
| @@ -478,6 +478,11 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, | |||
| 478 | if (vif->type != NL80211_IFTYPE_STATION) | 478 | if (vif->type != NL80211_IFTYPE_STATION) |
| 479 | return; | 479 | return; |
| 480 | 480 | ||
| 481 | if (sig == 0) { | ||
| 482 | IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); | ||
| 483 | return; | ||
| 484 | } | ||
| 485 | |||
| 481 | mvmvif->bf_data.ave_beacon_signal = sig; | 486 | mvmvif->bf_data.ave_beacon_signal = sig; |
| 482 | 487 | ||
| 483 | /* BT Coex */ | 488 | /* BT Coex */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 74e1c86289dc..1075a213bd6a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
| @@ -319,7 +319,7 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, | |||
| 319 | struct iwl_device_cmd *cmd) | 319 | struct iwl_device_cmd *cmd) |
| 320 | { | 320 | { |
| 321 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | 321 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
| 322 | struct iwl_scan_complete_notif *notif = (void *)pkt->data; | 322 | struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; |
| 323 | 323 | ||
| 324 | IWL_DEBUG_SCAN(mvm, | 324 | IWL_DEBUG_SCAN(mvm, |
| 325 | "Scan offload iteration complete: status=0x%x scanned channels=%d\n", | 325 | "Scan offload iteration complete: status=0x%x scanned channels=%d\n", |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 01996c9d98a7..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /****************************************************************************** | 1 | /****************************************************************************** |
| 2 | * | 2 | * |
| 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. |
| 4 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 5 | * | 5 | * |
| 6 | * Portions of this file are derived from the ipw3945 project, as well | 6 | * Portions of this file are derived from the ipw3945 project, as well |
| 7 | * as portions of the ieee80211 subsystem header files. | 7 | * as portions of the ieee80211 subsystem header files. |
| @@ -320,7 +320,7 @@ struct iwl_trans_pcie { | |||
| 320 | 320 | ||
| 321 | /*protect hw register */ | 321 | /*protect hw register */ |
| 322 | spinlock_t reg_lock; | 322 | spinlock_t reg_lock; |
| 323 | bool cmd_in_flight; | 323 | bool cmd_hold_nic_awake; |
| 324 | bool ref_cmd_in_flight; | 324 | bool ref_cmd_in_flight; |
| 325 | 325 | ||
| 326 | /* protect ref counter */ | 326 | /* protect ref counter */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 2de8fbfe4edf..dc179094e6a0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
| @@ -5,8 +5,8 @@ | |||
| 5 | * | 5 | * |
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -31,8 +31,8 @@ | |||
| 31 | * | 31 | * |
| 32 | * BSD LICENSE | 32 | * BSD LICENSE |
| 33 | * | 33 | * |
| 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. |
| 35 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 35 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 36 | * All rights reserved. | 36 | * All rights reserved. |
| 37 | * | 37 | * |
| 38 | * Redistribution and use in source and binary forms, with or without | 38 | * Redistribution and use in source and binary forms, with or without |
| @@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) | |||
| 104 | static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) | 104 | static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) |
| 105 | { | 105 | { |
| 106 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 106 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
| 107 | struct page *page; | 107 | struct page *page = NULL; |
| 108 | dma_addr_t phys; | 108 | dma_addr_t phys; |
| 109 | u32 size; | 109 | u32 size; |
| 110 | u8 power; | 110 | u8 power; |
| @@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) | |||
| 131 | DMA_FROM_DEVICE); | 131 | DMA_FROM_DEVICE); |
| 132 | if (dma_mapping_error(trans->dev, phys)) { | 132 | if (dma_mapping_error(trans->dev, phys)) { |
| 133 | __free_pages(page, order); | 133 | __free_pages(page, order); |
| 134 | page = NULL; | ||
| 134 | continue; | 135 | continue; |
| 135 | } | 136 | } |
| 136 | IWL_INFO(trans, | 137 | IWL_INFO(trans, |
| @@ -1020,7 +1021,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) | |||
| 1020 | iwl_pcie_tx_start(trans, scd_addr); | 1021 | iwl_pcie_tx_start(trans, scd_addr); |
| 1021 | } | 1022 | } |
| 1022 | 1023 | ||
| 1023 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | 1024 | static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) |
| 1024 | { | 1025 | { |
| 1025 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 1026 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
| 1026 | bool hw_rfkill, was_hw_rfkill; | 1027 | bool hw_rfkill, was_hw_rfkill; |
| @@ -1048,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
| 1048 | iwl_pcie_rx_stop(trans); | 1049 | iwl_pcie_rx_stop(trans); |
| 1049 | 1050 | ||
| 1050 | /* Power-down device's busmaster DMA clocks */ | 1051 | /* Power-down device's busmaster DMA clocks */ |
| 1051 | iwl_write_prph(trans, APMG_CLK_DIS_REG, | 1052 | if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { |
| 1052 | APMG_CLK_VAL_DMA_CLK_RQT); | 1053 | iwl_write_prph(trans, APMG_CLK_DIS_REG, |
| 1053 | udelay(5); | 1054 | APMG_CLK_VAL_DMA_CLK_RQT); |
| 1055 | udelay(5); | ||
| 1056 | } | ||
| 1054 | } | 1057 | } |
| 1055 | 1058 | ||
| 1056 | /* Make sure (redundant) we've released our request to stay awake */ | 1059 | /* Make sure (redundant) we've released our request to stay awake */ |
| @@ -1115,7 +1118,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) | |||
| 1115 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) | 1118 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) |
| 1116 | { | 1119 | { |
| 1117 | if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) | 1120 | if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) |
| 1118 | iwl_trans_pcie_stop_device(trans); | 1121 | iwl_trans_pcie_stop_device(trans, true); |
| 1119 | } | 1122 | } |
| 1120 | 1123 | ||
| 1121 | static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) | 1124 | static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) |
| @@ -1200,7 +1203,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, | |||
| 1200 | return 0; | 1203 | return 0; |
| 1201 | } | 1204 | } |
| 1202 | 1205 | ||
| 1203 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 1206 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) |
| 1204 | { | 1207 | { |
| 1205 | bool hw_rfkill; | 1208 | bool hw_rfkill; |
| 1206 | int err; | 1209 | int err; |
| @@ -1369,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, | |||
| 1369 | 1372 | ||
| 1370 | spin_lock_irqsave(&trans_pcie->reg_lock, *flags); | 1373 | spin_lock_irqsave(&trans_pcie->reg_lock, *flags); |
| 1371 | 1374 | ||
| 1372 | if (trans_pcie->cmd_in_flight) | 1375 | if (trans_pcie->cmd_hold_nic_awake) |
| 1373 | goto out; | 1376 | goto out; |
| 1374 | 1377 | ||
| 1375 | /* this bit wakes up the NIC */ | 1378 | /* this bit wakes up the NIC */ |
| @@ -1435,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, | |||
| 1435 | */ | 1438 | */ |
| 1436 | __acquire(&trans_pcie->reg_lock); | 1439 | __acquire(&trans_pcie->reg_lock); |
| 1437 | 1440 | ||
| 1438 | if (trans_pcie->cmd_in_flight) | 1441 | if (trans_pcie->cmd_hold_nic_awake) |
| 1439 | goto out; | 1442 | goto out; |
| 1440 | 1443 | ||
| 1441 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | 1444 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 06952aadfd7b..5ef8044c2ea3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
| @@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, | |||
| 1039 | iwl_trans_pcie_ref(trans); | 1039 | iwl_trans_pcie_ref(trans); |
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | if (trans_pcie->cmd_in_flight) | ||
| 1043 | return 0; | ||
| 1044 | |||
| 1045 | trans_pcie->cmd_in_flight = true; | ||
| 1046 | |||
| 1047 | /* | 1042 | /* |
| 1048 | * wake up the NIC to make sure that the firmware will see the host | 1043 | * wake up the NIC to make sure that the firmware will see the host |
| 1049 | * command - we will let the NIC sleep once all the host commands | 1044 | * command - we will let the NIC sleep once all the host commands |
| 1050 | * returned. This needs to be done only on NICs that have | 1045 | * returned. This needs to be done only on NICs that have |
| 1051 | * apmg_wake_up_wa set. | 1046 | * apmg_wake_up_wa set. |
| 1052 | */ | 1047 | */ |
| 1053 | if (trans->cfg->base_params->apmg_wake_up_wa) { | 1048 | if (trans->cfg->base_params->apmg_wake_up_wa && |
| 1049 | !trans_pcie->cmd_hold_nic_awake) { | ||
| 1054 | __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, | 1050 | __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, |
| 1055 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 1051 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
| 1056 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) | 1052 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) |
| @@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, | |||
| 1064 | if (ret < 0) { | 1060 | if (ret < 0) { |
| 1065 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | 1061 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
| 1066 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 1062 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
| 1067 | trans_pcie->cmd_in_flight = false; | ||
| 1068 | IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); | 1063 | IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); |
| 1069 | return -EIO; | 1064 | return -EIO; |
| 1070 | } | 1065 | } |
| 1066 | trans_pcie->cmd_hold_nic_awake = true; | ||
| 1071 | } | 1067 | } |
| 1072 | 1068 | ||
| 1073 | return 0; | 1069 | return 0; |
| @@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) | |||
| 1085 | iwl_trans_pcie_unref(trans); | 1081 | iwl_trans_pcie_unref(trans); |
| 1086 | } | 1082 | } |
| 1087 | 1083 | ||
| 1088 | if (WARN_ON(!trans_pcie->cmd_in_flight)) | 1084 | if (trans->cfg->base_params->apmg_wake_up_wa) { |
| 1089 | return 0; | 1085 | if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) |
| 1090 | 1086 | return 0; | |
| 1091 | trans_pcie->cmd_in_flight = false; | ||
| 1092 | 1087 | ||
| 1093 | if (trans->cfg->base_params->apmg_wake_up_wa) | 1088 | trans_pcie->cmd_hold_nic_awake = false; |
| 1094 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | 1089 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
| 1095 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 1090 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
| 1096 | 1091 | } | |
| 1097 | return 0; | 1092 | return 0; |
| 1098 | } | 1093 | } |
| 1099 | 1094 | ||
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f0188c83c79f..2721cf89fb16 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
| @@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, | |||
| 126 | 126 | ||
| 127 | do { | 127 | do { |
| 128 | status = usb_control_msg(udev, pipe, request, reqtype, value, | 128 | status = usb_control_msg(udev, pipe, request, reqtype, value, |
| 129 | index, pdata, len, 0); /*max. timeout*/ | 129 | index, pdata, len, 1000); |
| 130 | if (status < 0) { | 130 | if (status < 0) { |
| 131 | /* firmware download is checksumed, don't retry */ | 131 | /* firmware download is checksumed, don't retry */ |
| 132 | if ((value >= FW_8192C_START_ADDRESS && | 132 | if ((value >= FW_8192C_START_ADDRESS && |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4de46aa61d95..0d2594395ffb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, | |||
| 1250 | netdev_err(queue->vif->dev, | 1250 | netdev_err(queue->vif->dev, |
| 1251 | "txreq.offset: %x, size: %u, end: %lu\n", | 1251 | "txreq.offset: %x, size: %u, end: %lu\n", |
| 1252 | txreq.offset, txreq.size, | 1252 | txreq.offset, txreq.size, |
| 1253 | (txreq.offset&~PAGE_MASK) + txreq.size); | 1253 | (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); |
| 1254 | xenvif_fatal_tx_err(queue->vif); | 1254 | xenvif_fatal_tx_err(queue->vif); |
| 1255 | break; | 1255 | break; |
| 1256 | } | 1256 | } |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 3d8dbf5f2d39..968787abf78d 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
| @@ -34,6 +34,8 @@ struct backend_info { | |||
| 34 | enum xenbus_state frontend_state; | 34 | enum xenbus_state frontend_state; |
| 35 | struct xenbus_watch hotplug_status_watch; | 35 | struct xenbus_watch hotplug_status_watch; |
| 36 | u8 have_hotplug_status_watch:1; | 36 | u8 have_hotplug_status_watch:1; |
| 37 | |||
| 38 | const char *hotplug_script; | ||
| 37 | }; | 39 | }; |
| 38 | 40 | ||
| 39 | static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); | 41 | static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); |
| @@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev) | |||
| 238 | xenvif_free(be->vif); | 240 | xenvif_free(be->vif); |
| 239 | be->vif = NULL; | 241 | be->vif = NULL; |
| 240 | } | 242 | } |
| 243 | kfree(be->hotplug_script); | ||
| 241 | kfree(be); | 244 | kfree(be); |
| 242 | dev_set_drvdata(&dev->dev, NULL); | 245 | dev_set_drvdata(&dev->dev, NULL); |
| 243 | return 0; | 246 | return 0; |
| @@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev, | |||
| 255 | struct xenbus_transaction xbt; | 258 | struct xenbus_transaction xbt; |
| 256 | int err; | 259 | int err; |
| 257 | int sg; | 260 | int sg; |
| 261 | const char *script; | ||
| 258 | struct backend_info *be = kzalloc(sizeof(struct backend_info), | 262 | struct backend_info *be = kzalloc(sizeof(struct backend_info), |
| 259 | GFP_KERNEL); | 263 | GFP_KERNEL); |
| 260 | if (!be) { | 264 | if (!be) { |
| @@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev, | |||
| 347 | if (err) | 351 | if (err) |
| 348 | pr_debug("Error writing multi-queue-max-queues\n"); | 352 | pr_debug("Error writing multi-queue-max-queues\n"); |
| 349 | 353 | ||
| 354 | script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); | ||
| 355 | if (IS_ERR(script)) { | ||
| 356 | err = PTR_ERR(script); | ||
| 357 | xenbus_dev_fatal(dev, err, "reading script"); | ||
| 358 | goto fail; | ||
| 359 | } | ||
| 360 | |||
| 361 | be->hotplug_script = script; | ||
| 362 | |||
| 350 | err = xenbus_switch_state(dev, XenbusStateInitWait); | 363 | err = xenbus_switch_state(dev, XenbusStateInitWait); |
| 351 | if (err) | 364 | if (err) |
| 352 | goto fail; | 365 | goto fail; |
| @@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev, | |||
| 379 | struct kobj_uevent_env *env) | 392 | struct kobj_uevent_env *env) |
| 380 | { | 393 | { |
| 381 | struct backend_info *be = dev_get_drvdata(&xdev->dev); | 394 | struct backend_info *be = dev_get_drvdata(&xdev->dev); |
| 382 | char *val; | ||
| 383 | 395 | ||
| 384 | val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); | 396 | if (!be) |
| 385 | if (IS_ERR(val)) { | 397 | return 0; |
| 386 | int err = PTR_ERR(val); | ||
| 387 | xenbus_dev_fatal(xdev, err, "reading script"); | ||
| 388 | return err; | ||
| 389 | } else { | ||
| 390 | if (add_uevent_var(env, "script=%s", val)) { | ||
| 391 | kfree(val); | ||
| 392 | return -ENOMEM; | ||
| 393 | } | ||
| 394 | kfree(val); | ||
| 395 | } | ||
| 396 | 398 | ||
| 397 | if (!be || !be->vif) | 399 | if (add_uevent_var(env, "script=%s", be->hotplug_script)) |
| 400 | return -ENOMEM; | ||
| 401 | |||
| 402 | if (!be->vif) | ||
| 398 | return 0; | 403 | return 0; |
| 399 | 404 | ||
| 400 | return add_uevent_var(env, "vif=%s", be->vif->dev->name); | 405 | return add_uevent_var(env, "vif=%s", be->vif->dev->name); |
| @@ -793,6 +798,7 @@ static void connect(struct backend_info *be) | |||
| 793 | goto err; | 798 | goto err; |
| 794 | } | 799 | } |
| 795 | 800 | ||
| 801 | queue->credit_bytes = credit_bytes; | ||
| 796 | queue->remaining_credit = credit_bytes; | 802 | queue->remaining_credit = credit_bytes; |
| 797 | queue->credit_usec = credit_usec; | 803 | queue->credit_usec = credit_usec; |
| 798 | 804 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 3f45afd4382e..e031c943286e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info) | |||
| 1698 | 1698 | ||
| 1699 | if (netif_running(info->netdev)) | 1699 | if (netif_running(info->netdev)) |
| 1700 | napi_disable(&queue->napi); | 1700 | napi_disable(&queue->napi); |
| 1701 | del_timer_sync(&queue->rx_refill_timer); | ||
| 1701 | netif_napi_del(&queue->napi); | 1702 | netif_napi_del(&queue->napi); |
| 1702 | } | 1703 | } |
| 1703 | 1704 | ||
| @@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = { | |||
| 2102 | static int xennet_remove(struct xenbus_device *dev) | 2103 | static int xennet_remove(struct xenbus_device *dev) |
| 2103 | { | 2104 | { |
| 2104 | struct netfront_info *info = dev_get_drvdata(&dev->dev); | 2105 | struct netfront_info *info = dev_get_drvdata(&dev->dev); |
| 2105 | unsigned int num_queues = info->netdev->real_num_tx_queues; | ||
| 2106 | struct netfront_queue *queue = NULL; | ||
| 2107 | unsigned int i = 0; | ||
| 2108 | 2106 | ||
| 2109 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | 2107 | dev_dbg(&dev->dev, "%s\n", dev->nodename); |
| 2110 | 2108 | ||
| @@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev) | |||
| 2112 | 2110 | ||
| 2113 | unregister_netdev(info->netdev); | 2111 | unregister_netdev(info->netdev); |
| 2114 | 2112 | ||
| 2115 | for (i = 0; i < num_queues; ++i) { | 2113 | xennet_destroy_queues(info); |
| 2116 | queue = &info->queues[i]; | ||
| 2117 | del_timer_sync(&queue->rx_refill_timer); | ||
| 2118 | } | ||
| 2119 | |||
| 2120 | if (num_queues) { | ||
| 2121 | kfree(info->queues); | ||
| 2122 | info->queues = NULL; | ||
| 2123 | } | ||
| 2124 | |||
| 2125 | xennet_free_netdev(info->netdev); | 2114 | xennet_free_netdev(info->netdev); |
| 2126 | 2115 | ||
| 2127 | return 0; | 2116 | return 0; |
