summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/ata/ahci_mtk.c6
-rw-r--r--drivers/ata/ahci_qoriq.c12
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/base/power/main.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c44
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c7
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmatest.c55
-rw-r--r--drivers/dma/fsl-edma.c28
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c52
-rw-r--r--drivers/gpu/drm/drm_lease.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c1
-rw-r--r--drivers/hwtracing/stm/ftrace.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.h3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c3
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/security.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c22
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/md/dm-bufio.c8
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-mpath.c67
-rw-r--r--drivers/md/dm-snap.c48
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/memory/omap-gpmc.c5
-rw-r--r--drivers/misc/eeprom/at24.c26
-rw-r--r--drivers/misc/pti.c2
-rw-r--r--drivers/mmc/core/card.h2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/quirks.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c82
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c10
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c27
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c74
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/of/of_mdio.c3
-rw-r--r--drivers/pci/host/pcie-rcar.c8
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/platform/x86/asus-wireless.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c17
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c6
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c36
-rw-r--r--drivers/s390/net/qeth_l3_sys.c75
-rw-r--r--drivers/scsi/aacraid/commsup.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.h10
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/scsi_debugfs.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c27
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/staging/ccree/ssi_hash.c2
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/tee/optee/core.c11
-rw-r--r--drivers/tee/optee/optee_private.h43
-rw-r--r--drivers/tee/optee/rpc.c4
-rw-r--r--drivers/tee/optee/supp.c375
-rw-r--r--drivers/tee/tee_core.c32
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c5
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/legacy/Kconfig12
-rw-r--r--drivers/usb/host/xhci-mem.c15
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/usbip/stub_rx.c51
-rw-r--r--drivers/usb/usbip/stub_tx.c7
-rw-r--r--drivers/usb/usbip/usbip_common.h1
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c25
-rw-r--r--drivers/virtio/virtio_mmio.c43
-rw-r--r--drivers/xen/Kconfig2
129 files changed, 1355 insertions, 809 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaeec9ec2..a4c8ad98560d 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1138,7 +1138,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
1138 * skip all of the subsequent "thaw" callbacks for the device. 1138 * skip all of the subsequent "thaw" callbacks for the device.
1139 */ 1139 */
1140 if (dev_pm_smart_suspend_and_suspended(dev)) { 1140 if (dev_pm_smart_suspend_and_suspended(dev)) {
1141 dev->power.direct_complete = true; 1141 dev_pm_skip_next_resume_phases(dev);
1142 return 0; 1142 return 0;
1143 } 1143 }
1144 1144
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 80854f71559a..0ae6971c2a4c 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * MeidaTek AHCI SATA driver 2 * MediaTek AHCI SATA driver
3 * 3 *
4 * Copyright (c) 2017 MediaTek Inc. 4 * Copyright (c) 2017 MediaTek Inc.
5 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
25#include <linux/reset.h> 25#include <linux/reset.h>
26#include "ahci.h" 26#include "ahci.h"
27 27
28#define DRV_NAME "ahci" 28#define DRV_NAME "ahci-mtk"
29 29
30#define SYS_CFG 0x14 30#define SYS_CFG 0x14
31#define SYS_CFG_SATA_MSK GENMASK(31, 30) 31#define SYS_CFG_SATA_MSK GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
192}; 192};
193module_platform_driver(mtk_ahci_driver); 193module_platform_driver(mtk_ahci_driver);
194 194
195MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver"); 195MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
196MODULE_LICENSE("GPL v2"); 196MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b6b0bf76dfc7..2685f28160f7 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -35,6 +35,8 @@
35 35
36/* port register default value */ 36/* port register default value */
37#define AHCI_PORT_PHY_1_CFG 0xa003fffe 37#define AHCI_PORT_PHY_1_CFG 0xa003fffe
38#define AHCI_PORT_PHY2_CFG 0x28184d1f
39#define AHCI_PORT_PHY3_CFG 0x0e081509
38#define AHCI_PORT_TRANS_CFG 0x08000029 40#define AHCI_PORT_TRANS_CFG 0x08000029
39#define AHCI_PORT_AXICC_CFG 0x3fffffff 41#define AHCI_PORT_AXICC_CFG 0x3fffffff
40 42
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
183 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, 185 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
184 qpriv->ecc_addr); 186 qpriv->ecc_addr);
185 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 187 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
188 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
189 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
186 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 190 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
187 if (qpriv->is_dmacoherent) 191 if (qpriv->is_dmacoherent)
188 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 192 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
190 194
191 case AHCI_LS2080A: 195 case AHCI_LS2080A:
192 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 196 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
197 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
198 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
193 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 199 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
194 if (qpriv->is_dmacoherent) 200 if (qpriv->is_dmacoherent)
195 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 201 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
201 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, 207 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
202 qpriv->ecc_addr); 208 qpriv->ecc_addr);
203 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 209 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
210 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
211 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
204 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 212 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
205 if (qpriv->is_dmacoherent) 213 if (qpriv->is_dmacoherent)
206 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 214 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
212 writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A, 220 writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
213 qpriv->ecc_addr); 221 qpriv->ecc_addr);
214 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 222 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
223 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
224 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
215 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 225 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
216 if (qpriv->is_dmacoherent) 226 if (qpriv->is_dmacoherent)
217 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 227 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
219 229
220 case AHCI_LS2088A: 230 case AHCI_LS2088A:
221 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 231 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
232 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
233 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
222 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 234 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
223 if (qpriv->is_dmacoherent) 235 if (qpriv->is_dmacoherent)
224 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 236 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2a882929de4a..8193b38a1cae 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3082 bit = fls(mask) - 1; 3082 bit = fls(mask) - 1;
3083 mask &= ~(1 << bit); 3083 mask &= ~(1 << bit);
3084 3084
3085 /* Mask off all speeds higher than or equal to the current 3085 /*
3086 * one. Force 1.5Gbps if current SPD is not available. 3086 * Mask off all speeds higher than or equal to the current one. At
3087 * this point, if current SPD is not available and we previously
3088 * recorded the link speed from SStatus, the driver has already
3089 * masked off the highest bit so mask should already be 1 or 0.
3090 * Otherwise, we should not force 1.5Gbps on a link where we have
3091 * not previously recorded speed from SStatus. Just return in this
3092 * case.
3087 */ 3093 */
3088 if (spd > 1) 3094 if (spd > 1)
3089 mask &= (1 << (spd - 1)) - 1; 3095 mask &= (1 << (spd - 1)) - 1;
3090 else 3096 else
3091 mask &= 1; 3097 return -EINVAL;
3092 3098
3093 /* were we already at the bottom? */ 3099 /* were we already at the bottom? */
3094 if (!mask) 3100 if (!mask)
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ffd8d33c6e0f..6db2e34bd52f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
82 * is issued to the device. However, if the controller clock is 133MHz, 82 * is issued to the device. However, if the controller clock is 133MHz,
83 * the following tables must be used. 83 * the following tables must be used.
84 */ 84 */
85static struct pdc2027x_pio_timing { 85static const struct pdc2027x_pio_timing {
86 u8 value0, value1, value2; 86 u8 value0, value1, value2;
87} pdc2027x_pio_timing_tbl[] = { 87} pdc2027x_pio_timing_tbl[] = {
88 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */ 88 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
92 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */ 92 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
93}; 93};
94 94
95static struct pdc2027x_mdma_timing { 95static const struct pdc2027x_mdma_timing {
96 u8 value0, value1; 96 u8 value0, value1;
97} pdc2027x_mdma_timing_tbl[] = { 97} pdc2027x_mdma_timing_tbl[] = {
98 { 0xdf, 0x5f }, /* MDMA mode 0 */ 98 { 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
100 { 0x69, 0x25 }, /* MDMA mode 2 */ 100 { 0x69, 0x25 }, /* MDMA mode 2 */
101}; 101};
102 102
103static struct pdc2027x_udma_timing { 103static const struct pdc2027x_udma_timing {
104 u8 value0, value1, value2; 104 u8 value0, value1, value2;
105} pdc2027x_udma_timing_tbl[] = { 105} pdc2027x_udma_timing_tbl[] = {
106 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */ 106 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
649 * @host: target ATA host 649 * @host: target ATA host
650 * @board_idx: board identifier 650 * @board_idx: board identifier
651 */ 651 */
652static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) 652static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
653{ 653{
654 long pll_clock; 654 long pll_clock;
655 655
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
665 665
666 /* Adjust PLL control register */ 666 /* Adjust PLL control register */
667 pdc_adjust_pll(host, pll_clock, board_idx); 667 pdc_adjust_pll(host, pll_clock, board_idx);
668
669 return 0;
670} 668}
671 669
672/** 670/**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
753 //pci_enable_intx(pdev); 751 //pci_enable_intx(pdev);
754 752
755 /* initialize adapter */ 753 /* initialize adapter */
756 if (pdc_hardware_init(host, board_idx) != 0) 754 pdc_hardware_init(host, board_idx);
757 return -EIO;
758 755
759 pci_set_master(pdev); 756 pci_set_master(pdev);
760 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 757 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
778 else 775 else
779 board_idx = PDC_UDMA_133; 776 board_idx = PDC_UDMA_133;
780 777
781 if (pdc_hardware_init(host, board_idx)) 778 pdc_hardware_init(host, board_idx);
782 return -EIO;
783 779
784 ata_host_resume(host); 780 ata_host_resume(host);
785 return 0; 781 return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f04415927..08744b572af6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -526,6 +526,21 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
526/*------------------------- Resume routines -------------------------*/ 526/*------------------------- Resume routines -------------------------*/
527 527
528/** 528/**
529 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
530 * @dev: Target device.
531 *
532 * Make the core skip the "early resume" and "resume" phases for @dev.
533 *
534 * This function can be called by middle-layer code during the "noirq" phase of
535 * system resume if necessary, but not by device drivers.
536 */
537void dev_pm_skip_next_resume_phases(struct device *dev)
538{
539 dev->power.is_late_suspended = false;
540 dev->power.is_suspended = false;
541}
542
543/**
529 * device_resume_noirq - Execute a "noirq resume" callback for given device. 544 * device_resume_noirq - Execute a "noirq resume" callback for given device.
530 * @dev: Device to handle. 545 * @dev: Device to handle.
531 * @state: PM transition of the system being carried out. 546 * @state: PM transition of the system being carried out.
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 779869ed32b1..71fad747c0c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -199,6 +199,9 @@ struct smi_info {
199 /* The timer for this si. */ 199 /* The timer for this si. */
200 struct timer_list si_timer; 200 struct timer_list si_timer;
201 201
202 /* This flag is set, if the timer can be set */
203 bool timer_can_start;
204
202 /* This flag is set, if the timer is running (timer_pending() isn't enough) */ 205 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
203 bool timer_running; 206 bool timer_running;
204 207
@@ -355,6 +358,8 @@ out:
355 358
356static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 359static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
357{ 360{
361 if (!smi_info->timer_can_start)
362 return;
358 smi_info->last_timeout_jiffies = jiffies; 363 smi_info->last_timeout_jiffies = jiffies;
359 mod_timer(&smi_info->si_timer, new_val); 364 mod_timer(&smi_info->si_timer, new_val);
360 smi_info->timer_running = true; 365 smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
374 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); 379 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
375} 380}
376 381
377static void start_check_enables(struct smi_info *smi_info, bool start_timer) 382static void start_check_enables(struct smi_info *smi_info)
378{ 383{
379 unsigned char msg[2]; 384 unsigned char msg[2];
380 385
381 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 386 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
382 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 387 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
383 388
384 if (start_timer) 389 start_new_msg(smi_info, msg, 2);
385 start_new_msg(smi_info, msg, 2);
386 else
387 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
388 smi_info->si_state = SI_CHECKING_ENABLES; 390 smi_info->si_state = SI_CHECKING_ENABLES;
389} 391}
390 392
391static void start_clear_flags(struct smi_info *smi_info, bool start_timer) 393static void start_clear_flags(struct smi_info *smi_info)
392{ 394{
393 unsigned char msg[3]; 395 unsigned char msg[3];
394 396
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
397 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 399 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
398 msg[2] = WDT_PRE_TIMEOUT_INT; 400 msg[2] = WDT_PRE_TIMEOUT_INT;
399 401
400 if (start_timer) 402 start_new_msg(smi_info, msg, 3);
401 start_new_msg(smi_info, msg, 3);
402 else
403 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
404 smi_info->si_state = SI_CLEARING_FLAGS; 403 smi_info->si_state = SI_CLEARING_FLAGS;
405} 404}
406 405
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
435 * Note that we cannot just use disable_irq(), since the interrupt may 434 * Note that we cannot just use disable_irq(), since the interrupt may
436 * be shared. 435 * be shared.
437 */ 436 */
438static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) 437static inline bool disable_si_irq(struct smi_info *smi_info)
439{ 438{
440 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 439 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
441 smi_info->interrupt_disabled = true; 440 smi_info->interrupt_disabled = true;
442 start_check_enables(smi_info, start_timer); 441 start_check_enables(smi_info);
443 return true; 442 return true;
444 } 443 }
445 return false; 444 return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
449{ 448{
450 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { 449 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
451 smi_info->interrupt_disabled = false; 450 smi_info->interrupt_disabled = false;
452 start_check_enables(smi_info, true); 451 start_check_enables(smi_info);
453 return true; 452 return true;
454 } 453 }
455 return false; 454 return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
467 466
468 msg = ipmi_alloc_smi_msg(); 467 msg = ipmi_alloc_smi_msg();
469 if (!msg) { 468 if (!msg) {
470 if (!disable_si_irq(smi_info, true)) 469 if (!disable_si_irq(smi_info))
471 smi_info->si_state = SI_NORMAL; 470 smi_info->si_state = SI_NORMAL;
472 } else if (enable_si_irq(smi_info)) { 471 } else if (enable_si_irq(smi_info)) {
473 ipmi_free_smi_msg(msg); 472 ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
483 /* Watchdog pre-timeout */ 482 /* Watchdog pre-timeout */
484 smi_inc_stat(smi_info, watchdog_pretimeouts); 483 smi_inc_stat(smi_info, watchdog_pretimeouts);
485 484
486 start_clear_flags(smi_info, true); 485 start_clear_flags(smi_info);
487 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 486 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
488 if (smi_info->intf) 487 if (smi_info->intf)
489 ipmi_smi_watchdog_pretimeout(smi_info->intf); 488 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
866 * disable and messages disabled. 865 * disable and messages disabled.
867 */ 866 */
868 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { 867 if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
869 start_check_enables(smi_info, true); 868 start_check_enables(smi_info);
870 } else { 869 } else {
871 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 870 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
872 if (!smi_info->curr_msg) 871 if (!smi_info->curr_msg)
@@ -1167,6 +1166,7 @@ static int smi_start_processing(void *send_info,
1167 1166
1168 /* Set up the timer that drives the interface. */ 1167 /* Set up the timer that drives the interface. */
1169 timer_setup(&new_smi->si_timer, smi_timeout, 0); 1168 timer_setup(&new_smi->si_timer, smi_timeout, 0);
1169 new_smi->timer_can_start = true;
1170 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1170 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1171 1171
1172 /* Try to claim any interrupts. */ 1172 /* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
1936 check_set_rcv_irq(smi_info); 1936 check_set_rcv_irq(smi_info);
1937} 1937}
1938 1938
1939static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 1939static inline void stop_timer_and_thread(struct smi_info *smi_info)
1940{ 1940{
1941 if (smi_info->thread != NULL) 1941 if (smi_info->thread != NULL)
1942 kthread_stop(smi_info->thread); 1942 kthread_stop(smi_info->thread);
1943
1944 smi_info->timer_can_start = false;
1943 if (smi_info->timer_running) 1945 if (smi_info->timer_running)
1944 del_timer_sync(&smi_info->si_timer); 1946 del_timer_sync(&smi_info->si_timer);
1945} 1947}
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
2152 * Start clearing the flags before we enable interrupts or the 2154 * Start clearing the flags before we enable interrupts or the
2153 * timer to avoid racing with the timer. 2155 * timer to avoid racing with the timer.
2154 */ 2156 */
2155 start_clear_flags(new_smi, false); 2157 start_clear_flags(new_smi);
2156 2158
2157 /* 2159 /*
2158 * IRQ is defined to be set when non-zero. req_events will 2160 * IRQ is defined to be set when non-zero. req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
2238 dev_set_drvdata(new_smi->io.dev, NULL); 2240 dev_set_drvdata(new_smi->io.dev, NULL);
2239 2241
2240out_err_stop_timer: 2242out_err_stop_timer:
2241 wait_for_timer_and_thread(new_smi); 2243 stop_timer_and_thread(new_smi);
2242 2244
2243out_err: 2245out_err:
2244 new_smi->interrupt_disabled = true; 2246 new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
2388 */ 2390 */
2389 if (to_clean->io.irq_cleanup) 2391 if (to_clean->io.irq_cleanup)
2390 to_clean->io.irq_cleanup(&to_clean->io); 2392 to_clean->io.irq_cleanup(&to_clean->io);
2391 wait_for_timer_and_thread(to_clean); 2393 stop_timer_and_thread(to_clean);
2392 2394
2393 /* 2395 /*
2394 * Timeouts are stopped, now make sure the interrupts are off 2396 * Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
2400 schedule_timeout_uninterruptible(1); 2402 schedule_timeout_uninterruptible(1);
2401 } 2403 }
2402 if (to_clean->handlers) 2404 if (to_clean->handlers)
2403 disable_si_irq(to_clean, false); 2405 disable_si_irq(to_clean);
2404 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 2406 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2405 poll(to_clean); 2407 poll(to_clean);
2406 schedule_timeout_uninterruptible(1); 2408 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 090b073ab441..6b10f0e18a95 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
10{ 10{
11 struct si_sm_io io; 11 struct si_sm_io io;
12 12
13 memset(&io, 0, sizeof(io));
14
13 io.si_type = SI_KCS; 15 io.si_type = SI_KCS;
14 io.addr_source = SI_DEVICETREE; 16 io.addr_source = SI_DEVICETREE;
15 io.addr_type = IPMI_MEM_ADDR_SPACE; 17 io.addr_type = IPMI_MEM_ADDR_SPACE;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 99771f5cad07..27dd11c49d21 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
103 io.addr_source_cleanup = ipmi_pci_cleanup; 103 io.addr_source_cleanup = ipmi_pci_cleanup;
104 io.addr_source_data = pdev; 104 io.addr_source_data = pdev;
105 105
106 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) 106 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
107 io.addr_type = IPMI_IO_ADDR_SPACE; 107 io.addr_type = IPMI_IO_ADDR_SPACE;
108 else 108 io.io_setup = ipmi_si_port_setup;
109 } else {
109 io.addr_type = IPMI_MEM_ADDR_SPACE; 110 io.addr_type = IPMI_MEM_ADDR_SPACE;
111 io.io_setup = ipmi_si_mem_setup;
112 }
110 io.addr_data = pci_resource_start(pdev, 0); 113 io.addr_data = pci_resource_start(pdev, 0);
111 114
112 io.regspacing = ipmi_pci_probe_regspacing(&io); 115 io.regspacing = ipmi_pci_probe_regspacing(&io);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fbab271b3bf9..a861b5b4d443 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
708 unsigned long flags) 708 unsigned long flags)
709{ 709{
710 struct at_dma_chan *atchan = to_at_dma_chan(chan); 710 struct at_dma_chan *atchan = to_at_dma_chan(chan);
711 struct data_chunk *first = xt->sgl; 711 struct data_chunk *first;
712 struct at_desc *desc = NULL; 712 struct at_desc *desc = NULL;
713 size_t xfer_count; 713 size_t xfer_count;
714 unsigned int dwidth; 714 unsigned int dwidth;
@@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) 720 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
721 return NULL; 721 return NULL;
722 722
723 first = xt->sgl;
724
723 dev_info(chan2dev(chan), 725 dev_info(chan2dev(chan),
724 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", 726 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
725 __func__, &xt->src_start, &xt->dst_start, xt->numf, 727 __func__, &xt->src_start, &xt->dst_start, xt->numf,
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index d50273fed715..afd5e10f8927 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
555 555
556 ret = dma_async_device_register(dd); 556 ret = dma_async_device_register(dd);
557 if (ret) 557 if (ret)
558 return ret; 558 goto err_clk;
559 559
560 irq = platform_get_irq(pdev, 0); 560 irq = platform_get_irq(pdev, 0);
561 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); 561 ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
@@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
568 568
569err_unregister: 569err_unregister:
570 dma_async_device_unregister(dd); 570 dma_async_device_unregister(dd);
571err_clk:
572 clk_disable_unprepare(dmadev->clk);
571 return ret; 573 return ret;
572} 574}
573 575
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 47edc7fbf91f..ec5f9d2bc820 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
155#define PATTERN_COUNT_MASK 0x1f 155#define PATTERN_COUNT_MASK 0x1f
156#define PATTERN_MEMSET_IDX 0x01 156#define PATTERN_MEMSET_IDX 0x01
157 157
158/* poor man's completion - we want to use wait_event_freezable() on it */
159struct dmatest_done {
160 bool done;
161 wait_queue_head_t *wait;
162};
163
158struct dmatest_thread { 164struct dmatest_thread {
159 struct list_head node; 165 struct list_head node;
160 struct dmatest_info *info; 166 struct dmatest_info *info;
@@ -165,6 +171,8 @@ struct dmatest_thread {
165 u8 **dsts; 171 u8 **dsts;
166 u8 **udsts; 172 u8 **udsts;
167 enum dma_transaction_type type; 173 enum dma_transaction_type type;
174 wait_queue_head_t done_wait;
175 struct dmatest_done test_done;
168 bool done; 176 bool done;
169}; 177};
170 178
@@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
342 return error_count; 350 return error_count;
343} 351}
344 352
345/* poor man's completion - we want to use wait_event_freezable() on it */
346struct dmatest_done {
347 bool done;
348 wait_queue_head_t *wait;
349};
350 353
351static void dmatest_callback(void *arg) 354static void dmatest_callback(void *arg)
352{ 355{
353 struct dmatest_done *done = arg; 356 struct dmatest_done *done = arg;
354 357 struct dmatest_thread *thread =
355 done->done = true; 358 container_of(arg, struct dmatest_thread, done_wait);
356 wake_up_all(done->wait); 359 if (!thread->done) {
360 done->done = true;
361 wake_up_all(done->wait);
362 } else {
363 /*
364 * If thread->done, it means that this callback occurred
365 * after the parent thread has cleaned up. This can
366 * happen in the case that driver doesn't implement
367 * the terminate_all() functionality and a dma operation
368 * did not occur within the timeout period
369 */
370 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
371 }
357} 372}
358 373
359static unsigned int min_odd(unsigned int x, unsigned int y) 374static unsigned int min_odd(unsigned int x, unsigned int y)
@@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
424 */ 439 */
425static int dmatest_func(void *data) 440static int dmatest_func(void *data)
426{ 441{
427 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
428 struct dmatest_thread *thread = data; 442 struct dmatest_thread *thread = data;
429 struct dmatest_done done = { .wait = &done_wait }; 443 struct dmatest_done *done = &thread->test_done;
430 struct dmatest_info *info; 444 struct dmatest_info *info;
431 struct dmatest_params *params; 445 struct dmatest_params *params;
432 struct dma_chan *chan; 446 struct dma_chan *chan;
@@ -673,9 +687,9 @@ static int dmatest_func(void *data)
673 continue; 687 continue;
674 } 688 }
675 689
676 done.done = false; 690 done->done = false;
677 tx->callback = dmatest_callback; 691 tx->callback = dmatest_callback;
678 tx->callback_param = &done; 692 tx->callback_param = done;
679 cookie = tx->tx_submit(tx); 693 cookie = tx->tx_submit(tx);
680 694
681 if (dma_submit_error(cookie)) { 695 if (dma_submit_error(cookie)) {
@@ -688,21 +702,12 @@ static int dmatest_func(void *data)
688 } 702 }
689 dma_async_issue_pending(chan); 703 dma_async_issue_pending(chan);
690 704
691 wait_event_freezable_timeout(done_wait, done.done, 705 wait_event_freezable_timeout(thread->done_wait, done->done,
692 msecs_to_jiffies(params->timeout)); 706 msecs_to_jiffies(params->timeout));
693 707
694 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 708 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
695 709
696 if (!done.done) { 710 if (!done->done) {
697 /*
698 * We're leaving the timed out dma operation with
699 * dangling pointer to done_wait. To make this
700 * correct, we'll need to allocate wait_done for
701 * each test iteration and perform "who's gonna
702 * free it this time?" dancing. For now, just
703 * leave it dangling.
704 */
705 WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
706 dmaengine_unmap_put(um); 711 dmaengine_unmap_put(um);
707 result("test timed out", total_tests, src_off, dst_off, 712 result("test timed out", total_tests, src_off, dst_off,
708 len, 0); 713 len, 0);
@@ -789,7 +794,7 @@ err_thread_type:
789 dmatest_KBs(runtime, total_len), ret); 794 dmatest_KBs(runtime, total_len), ret);
790 795
791 /* terminate all transfers on specified channels */ 796 /* terminate all transfers on specified channels */
792 if (ret) 797 if (ret || failed_tests)
793 dmaengine_terminate_all(chan); 798 dmaengine_terminate_all(chan);
794 799
795 thread->done = true; 800 thread->done = true;
@@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
849 thread->info = info; 854 thread->info = info;
850 thread->chan = dtc->chan; 855 thread->chan = dtc->chan;
851 thread->type = type; 856 thread->type = type;
857 thread->test_done.wait = &thread->done_wait;
858 init_waitqueue_head(&thread->done_wait);
852 smp_wmb(); 859 smp_wmb();
853 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", 860 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
854 dma_chan_name(chan), op, i); 861 dma_chan_name(chan), op, i);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 6775f2c74e25..c7568869284e 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -863,11 +863,11 @@ static void fsl_edma_irq_exit(
863 } 863 }
864} 864}
865 865
866static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) 866static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
867{ 867{
868 int i; 868 int i;
869 869
870 for (i = 0; i < DMAMUX_NR; i++) 870 for (i = 0; i < nr_clocks; i++)
871 clk_disable_unprepare(fsl_edma->muxclk[i]); 871 clk_disable_unprepare(fsl_edma->muxclk[i]);
872} 872}
873 873
@@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev)
904 904
905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); 905 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
906 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); 906 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
907 if (IS_ERR(fsl_edma->muxbase[i])) 907 if (IS_ERR(fsl_edma->muxbase[i])) {
908 /* on error: disable all previously enabled clks */
909 fsl_disable_clocks(fsl_edma, i);
908 return PTR_ERR(fsl_edma->muxbase[i]); 910 return PTR_ERR(fsl_edma->muxbase[i]);
911 }
909 912
910 sprintf(clkname, "dmamux%d", i); 913 sprintf(clkname, "dmamux%d", i);
911 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); 914 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
912 if (IS_ERR(fsl_edma->muxclk[i])) { 915 if (IS_ERR(fsl_edma->muxclk[i])) {
913 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); 916 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
917 /* on error: disable all previously enabled clks */
918 fsl_disable_clocks(fsl_edma, i);
914 return PTR_ERR(fsl_edma->muxclk[i]); 919 return PTR_ERR(fsl_edma->muxclk[i]);
915 } 920 }
916 921
917 ret = clk_prepare_enable(fsl_edma->muxclk[i]); 922 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
918 if (ret) { 923 if (ret)
919 /* disable only clks which were enabled on error */ 924 /* on error: disable all previously enabled clks */
920 for (; i >= 0; i--) 925 fsl_disable_clocks(fsl_edma, i);
921 clk_disable_unprepare(fsl_edma->muxclk[i]);
922
923 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
924 return ret;
925 }
926 926
927 } 927 }
928 928
@@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
976 if (ret) { 976 if (ret) {
977 dev_err(&pdev->dev, 977 dev_err(&pdev->dev,
978 "Can't register Freescale eDMA engine. (%d)\n", ret); 978 "Can't register Freescale eDMA engine. (%d)\n", ret);
979 fsl_disable_clocks(fsl_edma); 979 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
980 return ret; 980 return ret;
981 } 981 }
982 982
@@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
985 dev_err(&pdev->dev, 985 dev_err(&pdev->dev,
986 "Can't register Freescale eDMA of_dma. (%d)\n", ret); 986 "Can't register Freescale eDMA of_dma. (%d)\n", ret);
987 dma_async_device_unregister(&fsl_edma->dma_dev); 987 dma_async_device_unregister(&fsl_edma->dma_dev);
988 fsl_disable_clocks(fsl_edma); 988 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
989 return ret; 989 return ret;
990 } 990 }
991 991
@@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
1015 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); 1015 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
1016 of_dma_controller_free(np); 1016 of_dma_controller_free(np);
1017 dma_async_device_unregister(&fsl_edma->dma_dev); 1017 dma_async_device_unregister(&fsl_edma->dma_dev);
1018 fsl_disable_clocks(fsl_edma); 1018 fsl_disable_clocks(fsl_edma, DMAMUX_NR);
1019 1019
1020 return 0; 1020 return 0;
1021} 1021}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 2f31d3d0caa6..7792a9186f9c 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
390 if (memcmp(src, dest, IOAT_TEST_SIZE)) { 390 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
391 dev_err(dev, "Self-test copy failed compare, disabling\n"); 391 dev_err(dev, "Self-test copy failed compare, disabling\n");
392 err = -ENODEV; 392 err = -ENODEV;
393 goto free_resources; 393 goto unmap_dma;
394 } 394 }
395 395
396unmap_dma: 396unmap_dma:
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 482014137953..9ae236036e32 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -152,14 +152,23 @@ static void drm_connector_free(struct kref *kref)
152 connector->funcs->destroy(connector); 152 connector->funcs->destroy(connector);
153} 153}
154 154
155static void drm_connector_free_work_fn(struct work_struct *work) 155void drm_connector_free_work_fn(struct work_struct *work)
156{ 156{
157 struct drm_connector *connector = 157 struct drm_connector *connector, *n;
158 container_of(work, struct drm_connector, free_work); 158 struct drm_device *dev =
159 struct drm_device *dev = connector->dev; 159 container_of(work, struct drm_device, mode_config.connector_free_work);
160 struct drm_mode_config *config = &dev->mode_config;
161 unsigned long flags;
162 struct llist_node *freed;
160 163
161 drm_mode_object_unregister(dev, &connector->base); 164 spin_lock_irqsave(&config->connector_list_lock, flags);
162 connector->funcs->destroy(connector); 165 freed = llist_del_all(&config->connector_free_list);
166 spin_unlock_irqrestore(&config->connector_list_lock, flags);
167
168 llist_for_each_entry_safe(connector, n, freed, free_node) {
169 drm_mode_object_unregister(dev, &connector->base);
170 connector->funcs->destroy(connector);
171 }
163} 172}
164 173
165/** 174/**
@@ -191,8 +200,6 @@ int drm_connector_init(struct drm_device *dev,
191 if (ret) 200 if (ret)
192 return ret; 201 return ret;
193 202
194 INIT_WORK(&connector->free_work, drm_connector_free_work_fn);
195
196 connector->base.properties = &connector->properties; 203 connector->base.properties = &connector->properties;
197 connector->dev = dev; 204 connector->dev = dev;
198 connector->funcs = funcs; 205 connector->funcs = funcs;
@@ -547,10 +554,17 @@ EXPORT_SYMBOL(drm_connector_list_iter_begin);
547 * actually release the connector when dropping our final reference. 554 * actually release the connector when dropping our final reference.
548 */ 555 */
549static void 556static void
550drm_connector_put_safe(struct drm_connector *conn) 557__drm_connector_put_safe(struct drm_connector *conn)
551{ 558{
552 if (refcount_dec_and_test(&conn->base.refcount.refcount)) 559 struct drm_mode_config *config = &conn->dev->mode_config;
553 schedule_work(&conn->free_work); 560
561 lockdep_assert_held(&config->connector_list_lock);
562
563 if (!refcount_dec_and_test(&conn->base.refcount.refcount))
564 return;
565
566 llist_add(&conn->free_node, &config->connector_free_list);
567 schedule_work(&config->connector_free_work);
554} 568}
555 569
556/** 570/**
@@ -582,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
582 596
583 /* loop until it's not a zombie connector */ 597 /* loop until it's not a zombie connector */
584 } while (!kref_get_unless_zero(&iter->conn->base.refcount)); 598 } while (!kref_get_unless_zero(&iter->conn->base.refcount));
585 spin_unlock_irqrestore(&config->connector_list_lock, flags);
586 599
587 if (old_conn) 600 if (old_conn)
588 drm_connector_put_safe(old_conn); 601 __drm_connector_put_safe(old_conn);
602 spin_unlock_irqrestore(&config->connector_list_lock, flags);
589 603
590 return iter->conn; 604 return iter->conn;
591} 605}
@@ -602,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
602 */ 616 */
603void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) 617void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
604{ 618{
619 struct drm_mode_config *config = &iter->dev->mode_config;
620 unsigned long flags;
621
605 iter->dev = NULL; 622 iter->dev = NULL;
606 if (iter->conn) 623 if (iter->conn) {
607 drm_connector_put_safe(iter->conn); 624 spin_lock_irqsave(&config->connector_list_lock, flags);
625 __drm_connector_put_safe(iter->conn);
626 spin_unlock_irqrestore(&config->connector_list_lock, flags);
627 }
608 lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); 628 lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
609} 629}
610EXPORT_SYMBOL(drm_connector_list_iter_end); 630EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1231,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
1231 if (edid) 1251 if (edid)
1232 size = EDID_LENGTH * (1 + edid->extensions); 1252 size = EDID_LENGTH * (1 + edid->extensions);
1233 1253
1254 /* Set the display info, using edid if available, otherwise
1255 * reseting the values to defaults. This duplicates the work
1256 * done in drm_add_edid_modes, but that function is not
1257 * consistently called before this one in all drivers and the
1258 * computation is cheap enough that it seems better to
1259 * duplicate it rather than attempt to ensure some arbitrary
1260 * ordering of calls.
1261 */
1262 if (edid)
1263 drm_add_display_info(connector, edid);
1264 else
1265 drm_reset_display_info(connector);
1266
1234 drm_object_property_set_value(&connector->base, 1267 drm_object_property_set_value(&connector->base,
1235 dev->mode_config.non_desktop_property, 1268 dev->mode_config.non_desktop_property,
1236 connector->display_info.non_desktop); 1269 connector->display_info.non_desktop);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 9ebb8841778c..af00f42ba269 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
142 uint64_t value); 142 uint64_t value);
143int drm_connector_create_standard_properties(struct drm_device *dev); 143int drm_connector_create_standard_properties(struct drm_device *dev);
144const char *drm_get_connector_force_name(enum drm_connector_force force); 144const char *drm_get_connector_force_name(enum drm_connector_force force);
145void drm_connector_free_work_fn(struct work_struct *work);
145 146
146/* IOCTL */ 147/* IOCTL */
147int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 148int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5dfe14763871..cb487148359a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
1731 * 1731 *
1732 * Returns true if @vendor is in @edid, false otherwise 1732 * Returns true if @vendor is in @edid, false otherwise
1733 */ 1733 */
1734static bool edid_vendor(struct edid *edid, const char *vendor) 1734static bool edid_vendor(const struct edid *edid, const char *vendor)
1735{ 1735{
1736 char edid_vendor[3]; 1736 char edid_vendor[3];
1737 1737
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
1749 * 1749 *
1750 * This tells subsequent routines what fixes they need to apply. 1750 * This tells subsequent routines what fixes they need to apply.
1751 */ 1751 */
1752static u32 edid_get_quirks(struct edid *edid) 1752static u32 edid_get_quirks(const struct edid *edid)
1753{ 1753{
1754 const struct edid_quirk *quirk; 1754 const struct edid_quirk *quirk;
1755 int i; 1755 int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2813/* 2813/*
2814 * Search EDID for CEA extension block. 2814 * Search EDID for CEA extension block.
2815 */ 2815 */
2816static u8 *drm_find_edid_extension(struct edid *edid, int ext_id) 2816static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
2817{ 2817{
2818 u8 *edid_ext = NULL; 2818 u8 *edid_ext = NULL;
2819 int i; 2819 int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
2835 return edid_ext; 2835 return edid_ext;
2836} 2836}
2837 2837
2838static u8 *drm_find_cea_extension(struct edid *edid) 2838static u8 *drm_find_cea_extension(const struct edid *edid)
2839{ 2839{
2840 return drm_find_edid_extension(edid, CEA_EXT); 2840 return drm_find_edid_extension(edid, CEA_EXT);
2841} 2841}
2842 2842
2843static u8 *drm_find_displayid_extension(struct edid *edid) 2843static u8 *drm_find_displayid_extension(const struct edid *edid)
2844{ 2844{
2845 return drm_find_edid_extension(edid, DISPLAYID_EXT); 2845 return drm_find_edid_extension(edid, DISPLAYID_EXT);
2846} 2846}
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
4363} 4363}
4364 4364
4365static void drm_parse_cea_ext(struct drm_connector *connector, 4365static void drm_parse_cea_ext(struct drm_connector *connector,
4366 struct edid *edid) 4366 const struct edid *edid)
4367{ 4367{
4368 struct drm_display_info *info = &connector->display_info; 4368 struct drm_display_info *info = &connector->display_info;
4369 const u8 *edid_ext; 4369 const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
4397 } 4397 }
4398} 4398}
4399 4399
4400static void drm_add_display_info(struct drm_connector *connector, 4400/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
4401 struct edid *edid, u32 quirks) 4401 * all of the values which would have been set from EDID
4402 */
4403void
4404drm_reset_display_info(struct drm_connector *connector)
4402{ 4405{
4403 struct drm_display_info *info = &connector->display_info; 4406 struct drm_display_info *info = &connector->display_info;
4404 4407
4408 info->width_mm = 0;
4409 info->height_mm = 0;
4410
4411 info->bpc = 0;
4412 info->color_formats = 0;
4413 info->cea_rev = 0;
4414 info->max_tmds_clock = 0;
4415 info->dvi_dual = false;
4416
4417 info->non_desktop = 0;
4418}
4419EXPORT_SYMBOL_GPL(drm_reset_display_info);
4420
4421u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
4422{
4423 struct drm_display_info *info = &connector->display_info;
4424
4425 u32 quirks = edid_get_quirks(edid);
4426
4405 info->width_mm = edid->width_cm * 10; 4427 info->width_mm = edid->width_cm * 10;
4406 info->height_mm = edid->height_cm * 10; 4428 info->height_mm = edid->height_cm * 10;
4407 4429
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
4414 4436
4415 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4437 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4416 4438
4439 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
4440
4417 if (edid->revision < 3) 4441 if (edid->revision < 3)
4418 return; 4442 return quirks;
4419 4443
4420 if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) 4444 if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
4421 return; 4445 return quirks;
4422 4446
4423 drm_parse_cea_ext(connector, edid); 4447 drm_parse_cea_ext(connector, edid);
4424 4448
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
4438 4462
4439 /* Only defined for 1.4 with digital displays */ 4463 /* Only defined for 1.4 with digital displays */
4440 if (edid->revision < 4) 4464 if (edid->revision < 4)
4441 return; 4465 return quirks;
4442 4466
4443 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { 4467 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
4444 case DRM_EDID_DIGITAL_DEPTH_6: 4468 case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
4473 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; 4497 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
4474 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) 4498 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
4475 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; 4499 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
4500 return quirks;
4476} 4501}
4502EXPORT_SYMBOL_GPL(drm_add_display_info);
4477 4503
4478static int validate_displayid(u8 *displayid, int length, int idx) 4504static int validate_displayid(u8 *displayid, int length, int idx)
4479{ 4505{
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
4627 return 0; 4653 return 0;
4628 } 4654 }
4629 4655
4630 quirks = edid_get_quirks(edid);
4631
4632 /* 4656 /*
4633 * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. 4657 * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
4634 * To avoid multiple parsing of same block, lets parse that map 4658 * To avoid multiple parsing of same block, lets parse that map
4635 * from sink info, before parsing CEA modes. 4659 * from sink info, before parsing CEA modes.
4636 */ 4660 */
4637 drm_add_display_info(connector, edid, quirks); 4661 quirks = drm_add_display_info(connector, edid);
4638 4662
4639 /* 4663 /*
4640 * EDID spec says modes should be preferred in this order: 4664 * EDID spec says modes should be preferred in this order:
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d1eb56a1eff4..59849f02e2ad 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
254 return lessee; 254 return lessee;
255 255
256out_lessee: 256out_lessee:
257 drm_master_put(&lessee);
258
259 mutex_unlock(&dev->mode_config.idr_mutex); 257 mutex_unlock(&dev->mode_config.idr_mutex);
260 258
259 drm_master_put(&lessee);
260
261 return ERR_PTR(error); 261 return ERR_PTR(error);
262} 262}
263 263
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 61a1c8ea74bc..c3c79ee6119e 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
575 */ 575 */
576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
577{ 577{
578 struct drm_mm *mm = old->mm;
579
578 DRM_MM_BUG_ON(!old->allocated); 580 DRM_MM_BUG_ON(!old->allocated);
579 581
580 *new = *old; 582 *new = *old;
581 583
582 list_replace(&old->node_list, &new->node_list); 584 list_replace(&old->node_list, &new->node_list);
583 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); 585 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
584 586
585 if (drm_mm_hole_follows(old)) { 587 if (drm_mm_hole_follows(old)) {
586 list_replace(&old->hole_stack, &new->hole_stack); 588 list_replace(&old->hole_stack, &new->hole_stack);
587 rb_replace_node(&old->rb_hole_size, 589 rb_replace_node(&old->rb_hole_size,
588 &new->rb_hole_size, 590 &new->rb_hole_size,
589 &old->mm->holes_size); 591 &mm->holes_size);
590 rb_replace_node(&old->rb_hole_addr, 592 rb_replace_node(&old->rb_hole_addr,
591 &new->rb_hole_addr, 593 &new->rb_hole_addr,
592 &old->mm->holes_addr); 594 &mm->holes_addr);
593 } 595 }
594 596
595 old->allocated = false; 597 old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index cc78b3d9e5e4..256de7313612 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
382 ida_init(&dev->mode_config.connector_ida); 382 ida_init(&dev->mode_config.connector_ida);
383 spin_lock_init(&dev->mode_config.connector_list_lock); 383 spin_lock_init(&dev->mode_config.connector_list_lock);
384 384
385 init_llist_head(&dev->mode_config.connector_free_list);
386 INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
387
385 drm_mode_create_standard_properties(dev); 388 drm_mode_create_standard_properties(dev);
386 389
387 /* Just to be sure */ 390 /* Just to be sure */
@@ -432,7 +435,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
432 } 435 }
433 drm_connector_list_iter_end(&conn_iter); 436 drm_connector_list_iter_end(&conn_iter);
434 /* connector_iter drops references in a work item. */ 437 /* connector_iter drops references in a work item. */
435 flush_scheduled_work(); 438 flush_work(&dev->mode_config.connector_free_work);
436 if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { 439 if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
437 drm_connector_list_iter_begin(dev, &conn_iter); 440 drm_connector_list_iter_begin(dev, &conn_iter);
438 drm_for_each_connector_iter(connector, &conn_iter) 441 drm_for_each_connector_iter(connector, &conn_iter)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6c32c89a83a9..638540943c61 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -888,8 +888,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
888 /* If we got force-completed because of GPU reset rather than 888 /* If we got force-completed because of GPU reset rather than
889 * through our IRQ handler, signal the fence now. 889 * through our IRQ handler, signal the fence now.
890 */ 890 */
891 if (exec->fence) 891 if (exec->fence) {
892 dma_fence_signal(exec->fence); 892 dma_fence_signal(exec->fence);
893 dma_fence_put(exec->fence);
894 }
893 895
894 if (exec->bo) { 896 if (exec->bo) {
895 for (i = 0; i < exec->bo_count; i++) { 897 for (i = 0; i < exec->bo_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 61b2e5377993..26eddbb62893 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
139 list_move_tail(&exec->head, &vc4->job_done_list); 139 list_move_tail(&exec->head, &vc4->job_done_list);
140 if (exec->fence) { 140 if (exec->fence) {
141 dma_fence_signal_locked(exec->fence); 141 dma_fence_signal_locked(exec->fence);
142 dma_fence_put(exec->fence);
142 exec->fence = NULL; 143 exec->fence = NULL;
143 } 144 }
144 vc4_submit_next_render_job(dev); 145 vc4_submit_next_render_job(dev);
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index bd126a7c6da2..7da75644c750 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -42,9 +42,11 @@ static struct stm_ftrace {
42 * @len: length of the data packet 42 * @len: length of the data packet
43 */ 43 */
44static void notrace 44static void notrace
45stm_ftrace_write(const void *buf, unsigned int len) 45stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
46{ 46{
47 stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len); 47 struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
48
49 stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
48} 50}
49 51
50static int stm_ftrace_link(struct stm_source_data *data) 52static int stm_ftrace_link(struct stm_source_data *data)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 0d05dadb2dc5..44cffad43701 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
379 return 0; 379 return 0;
380} 380}
381 381
382static struct platform_device_id cht_wc_i2c_adap_id_table[] = { 382static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
383 { .name = "cht_wcove_ext_chgr" }, 383 { .name = "cht_wcove_ext_chgr" },
384 {}, 384 {},
385}; 385};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 174579d32e5f..462948e2c535 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
983 983
984 if (adapdata->smba) { 984 if (adapdata->smba) {
985 i2c_del_adapter(adap); 985 i2c_del_adapter(adap);
986 if (adapdata->port == (0 << 1)) { 986 if (adapdata->port == (0 << piix4_port_shift_sb800)) {
987 release_region(adapdata->smba, SMBIOSIZE); 987 release_region(adapdata->smba, SMBIOSIZE);
988 if (adapdata->sb800_main) 988 if (adapdata->sb800_main)
989 release_region(SB800_PIIX4_SMB_IDX, 2); 989 release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index dab51761f8c5..d4f9cef251ac 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,10 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * i2c-stm32.h 3 * i2c-stm32.h
3 * 4 *
4 * Copyright (C) M'boumba Cedric Madianga 2017 5 * Copyright (C) M'boumba Cedric Madianga 2017
6 * Copyright (C) STMicroelectronics 2017
5 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 7 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
6 * 8 *
7 * License terms: GNU General Public License (GPL), version 2
8 */ 9 */
9 10
10#ifndef _I2C_STM32_H 11#ifndef _I2C_STM32_H
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4ec108496f15..47c8d00de53f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STMicroelectronics STM32 I2C controller 3 * Driver for STMicroelectronics STM32 I2C controller
3 * 4 *
@@ -6,11 +7,11 @@
6 * http://www.st.com/resource/en/reference_manual/DM00031020.pdf 7 * http://www.st.com/resource/en/reference_manual/DM00031020.pdf
7 * 8 *
8 * Copyright (C) M'boumba Cedric Madianga 2016 9 * Copyright (C) M'boumba Cedric Madianga 2016
10 * Copyright (C) STMicroelectronics 2017
9 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 11 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
10 * 12 *
11 * This driver is based on i2c-st.c 13 * This driver is based on i2c-st.c
12 * 14 *
13 * License terms: GNU General Public License (GPL), version 2
14 */ 15 */
15 16
16#include <linux/clk.h> 17#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d4a6e9c2e9aa..b445b3bb0bb1 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STMicroelectronics STM32F7 I2C controller 3 * Driver for STMicroelectronics STM32F7 I2C controller
3 * 4 *
@@ -7,11 +8,11 @@
7 * http://www.st.com/resource/en/reference_manual/dm00124865.pdf 8 * http://www.st.com/resource/en/reference_manual/dm00124865.pdf
8 * 9 *
9 * Copyright (C) M'boumba Cedric Madianga 2017 10 * Copyright (C) M'boumba Cedric Madianga 2017
11 * Copyright (C) STMicroelectronics 2017
10 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 12 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
11 * 13 *
12 * This driver is based on i2c-stm32f4.c 14 * This driver is based on i2c-stm32f4.c
13 * 15 *
14 * License terms: GNU General Public License (GPL), version 2
15 */ 16 */
16#include <linux/clk.h> 17#include <linux/clk.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f6983357145d..6294a7001d33 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4458,7 +4458,7 @@ out:
4458 return skb->len; 4458 return skb->len;
4459} 4459}
4460 4460
4461static const struct rdma_nl_cbs cma_cb_table[] = { 4461static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
4462 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, 4462 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
4463}; 4463};
4464 4464
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5e1be4949d5f..30914f3baa5f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1146} 1146}
1147EXPORT_SYMBOL(ib_get_net_dev_by_params); 1147EXPORT_SYMBOL(ib_get_net_dev_by_params);
1148 1148
1149static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { 1149static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1150 [RDMA_NL_LS_OP_RESOLVE] = { 1150 [RDMA_NL_LS_OP_RESOLVE] = {
1151 .doit = ib_nl_handle_resolve_resp, 1151 .doit = ib_nl_handle_resolve_resp,
1152 .flags = RDMA_NL_ADMIN_PERM, 1152 .flags = RDMA_NL_ADMIN_PERM,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e9e189ec7502..5d676cff41f4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
80} 80}
81EXPORT_SYMBOL(iwcm_reject_msg); 81EXPORT_SYMBOL(iwcm_reject_msg);
82 82
83static struct rdma_nl_cbs iwcm_nl_cb_table[] = { 83static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2fae850a3eff..9a05245a1acf 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
303 return skb->len; 303 return skb->len;
304} 304}
305 305
306static const struct rdma_nl_cbs nldev_cb_table[] = { 306static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
307 [RDMA_NLDEV_CMD_GET] = { 307 [RDMA_NLDEV_CMD_GET] = {
308 .doit = nldev_get_doit, 308 .doit = nldev_get_doit,
309 .dump = nldev_get_dumpit, 309 .dump = nldev_get_dumpit,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index a337386652b0..feafdb961c48 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) 739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
740 return 0; 740 return 0;
741 741
742 if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) 742 if (map->agent.qp->qp_type == IB_QPT_SMI) {
743 return -EACCES; 743 if (!map->agent.smp_allowed)
744 return -EACCES;
745 return 0;
746 }
744 747
745 return ib_security_pkey_access(map->agent.device, 748 return ib_security_pkey_access(map->agent.device,
746 map->agent.port_num, 749 map->agent.port_num,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 16d55710b116..d0202bb176a4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
1971 goto release_qp; 1971 goto release_qp;
1972 } 1972 }
1973 1973
1974 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1975 !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
1976 ret = -EINVAL;
1977 goto release_qp;
1978 }
1979
1974 attr->qp_state = cmd->base.qp_state; 1980 attr->qp_state = cmd->base.qp_state;
1975 attr->cur_qp_state = cmd->base.cur_qp_state; 1981 attr->cur_qp_state = cmd->base.cur_qp_state;
1976 attr->path_mtu = cmd->base.path_mtu; 1982 attr->path_mtu = cmd->base.path_mtu;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95cd2c5..b7bfc536e00f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
395 395
396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) 396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
397{ 397{
398 if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
399 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
400 return 0;
401 }
402
398 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) 403 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
399 return 0; 404 return 0;
400 405
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5ee7fe433136..38bddd02a943 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
868 868
869 qhp = to_c4iw_qp(ibqp); 869 qhp = to_c4iw_qp(ibqp);
870 spin_lock_irqsave(&qhp->lock, flag); 870 spin_lock_irqsave(&qhp->lock, flag);
871 if (t4_wq_in_error(&qhp->wq)) { 871
872 /*
873 * If the qp has been flushed, then just insert a special
874 * drain cqe.
875 */
876 if (qhp->wq.flushed) {
872 spin_unlock_irqrestore(&qhp->lock, flag); 877 spin_unlock_irqrestore(&qhp->lock, flag);
873 complete_sq_drain_wr(qhp, wr); 878 complete_sq_drain_wr(qhp, wr);
874 return err; 879 return err;
@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1011 1016
1012 qhp = to_c4iw_qp(ibqp); 1017 qhp = to_c4iw_qp(ibqp);
1013 spin_lock_irqsave(&qhp->lock, flag); 1018 spin_lock_irqsave(&qhp->lock, flag);
1014 if (t4_wq_in_error(&qhp->wq)) { 1019
1020 /*
1021 * If the qp has been flushed, then just insert a special
1022 * drain cqe.
1023 */
1024 if (qhp->wq.flushed) {
1015 spin_unlock_irqrestore(&qhp->lock, flag); 1025 spin_unlock_irqrestore(&qhp->lock, flag);
1016 complete_rq_drain_wr(qhp, wr); 1026 complete_rq_drain_wr(qhp, wr);
1017 return err; 1027 return err;
@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1285 spin_unlock_irqrestore(&rchp->lock, flag); 1295 spin_unlock_irqrestore(&rchp->lock, flag);
1286 1296
1287 if (schp == rchp) { 1297 if (schp == rchp) {
1288 if (t4_clear_cq_armed(&rchp->cq) && 1298 if ((rq_flushed || sq_flushed) &&
1289 (rq_flushed || sq_flushed)) { 1299 t4_clear_cq_armed(&rchp->cq)) {
1290 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1300 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1291 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1301 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1292 rchp->ibcq.cq_context); 1302 rchp->ibcq.cq_context);
1293 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1303 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1294 } 1304 }
1295 } else { 1305 } else {
1296 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { 1306 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1297 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1307 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1298 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1308 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1299 rchp->ibcq.cq_context); 1309 rchp->ibcq.cq_context);
1300 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1310 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1301 } 1311 }
1302 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { 1312 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1303 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1313 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1304 (*schp->ibcq.comp_handler)(&schp->ibcq, 1314 (*schp->ibcq.comp_handler)(&schp->ibcq,
1305 schp->ibcq.cq_context); 1315 schp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 013049bcdb53..caf490ab24c8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
666 return (-EOPNOTSUPP); 666 return (-EOPNOTSUPP);
667 } 667 }
668 668
669 if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
670 MLX4_IB_RX_HASH_DST_IPV4 |
671 MLX4_IB_RX_HASH_SRC_IPV6 |
672 MLX4_IB_RX_HASH_DST_IPV6 |
673 MLX4_IB_RX_HASH_SRC_PORT_TCP |
674 MLX4_IB_RX_HASH_DST_PORT_TCP |
675 MLX4_IB_RX_HASH_SRC_PORT_UDP |
676 MLX4_IB_RX_HASH_DST_PORT_UDP)) {
677 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
678 ucmd->rx_hash_fields_mask);
679 return (-EOPNOTSUPP);
680 }
681
669 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && 682 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
670 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { 683 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
671 rss_ctx->flags = MLX4_RSS_IPV4; 684 rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
691 return (-EOPNOTSUPP); 704 return (-EOPNOTSUPP);
692 } 705 }
693 706
694 if (rss_ctx->flags & MLX4_RSS_IPV4) { 707 if (rss_ctx->flags & MLX4_RSS_IPV4)
695 rss_ctx->flags |= MLX4_RSS_UDP_IPV4; 708 rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
696 } else if (rss_ctx->flags & MLX4_RSS_IPV6) { 709 if (rss_ctx->flags & MLX4_RSS_IPV6)
697 rss_ctx->flags |= MLX4_RSS_UDP_IPV6; 710 rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
698 } else { 711 if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
699 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); 712 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
700 return (-EOPNOTSUPP); 713 return (-EOPNOTSUPP);
701 } 714 }
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
707 720
708 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && 721 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
709 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { 722 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
710 if (rss_ctx->flags & MLX4_RSS_IPV4) { 723 if (rss_ctx->flags & MLX4_RSS_IPV4)
711 rss_ctx->flags |= MLX4_RSS_TCP_IPV4; 724 rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
712 } else if (rss_ctx->flags & MLX4_RSS_IPV6) { 725 if (rss_ctx->flags & MLX4_RSS_IPV6)
713 rss_ctx->flags |= MLX4_RSS_TCP_IPV6; 726 rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
714 } else { 727 if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
715 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); 728 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
716 return (-EOPNOTSUPP); 729 return (-EOPNOTSUPP);
717 } 730 }
718
719 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || 731 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
720 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { 732 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
721 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); 733 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 87f4bd99cdf7..2c13123bfd69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1145 noio_flag = memalloc_noio_save(); 1145 noio_flag = memalloc_noio_save();
1146 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); 1146 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1147 if (!p->tx_ring) { 1147 if (!p->tx_ring) {
1148 memalloc_noio_restore(noio_flag);
1148 ret = -ENOMEM; 1149 ret = -ENOMEM;
1149 goto err_tx; 1150 goto err_tx;
1150 } 1151 }
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b8ac591aaaa7..c546b567f3b5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1611 int l; 1611 int l;
1612 struct dm_buffer *b, *tmp; 1612 struct dm_buffer *b, *tmp;
1613 unsigned long freed = 0; 1613 unsigned long freed = 0;
1614 unsigned long count = nr_to_scan; 1614 unsigned long count = c->n_buffers[LIST_CLEAN] +
1615 c->n_buffers[LIST_DIRTY];
1615 unsigned long retain_target = get_retain_buffers(c); 1616 unsigned long retain_target = get_retain_buffers(c);
1616 1617
1617 for (l = 0; l < LIST_SIZE; l++) { 1618 for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
1647dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1648dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1648{ 1649{
1649 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); 1650 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1651 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1652 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1653 unsigned long retain_target = get_retain_buffers(c);
1650 1654
1651 return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); 1655 return (count < retain_target) ? 0 : (count - retain_target);
1652} 1656}
1653 1657
1654/* 1658/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index cf23a14f9c6a..47407e43b96a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
3472{ 3472{
3473 int r; 3473 int r;
3474 3474
3475 r = dm_register_target(&cache_target);
3476 if (r) {
3477 DMERR("cache target registration failed: %d", r);
3478 return r;
3479 }
3480
3481 migration_cache = KMEM_CACHE(dm_cache_migration, 0); 3475 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3482 if (!migration_cache) { 3476 if (!migration_cache) {
3483 dm_unregister_target(&cache_target); 3477 dm_unregister_target(&cache_target);
3484 return -ENOMEM; 3478 return -ENOMEM;
3485 } 3479 }
3486 3480
3481 r = dm_register_target(&cache_target);
3482 if (r) {
3483 DMERR("cache target registration failed: %d", r);
3484 return r;
3485 }
3486
3487 return 0; 3487 return 0;
3488} 3488}
3489 3489
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c8faa2b85842..f7810cc869ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -458,6 +458,38 @@ do { \
458} while (0) 458} while (0)
459 459
460/* 460/*
461 * Check whether bios must be queued in the device-mapper core rather
462 * than here in the target.
463 *
464 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
465 * the same value then we are not between multipath_presuspend()
466 * and multipath_resume() calls and we have no need to check
467 * for the DMF_NOFLUSH_SUSPENDING flag.
468 */
469static bool __must_push_back(struct multipath *m, unsigned long flags)
470{
471 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
473 dm_noflush_suspending(m->ti));
474}
475
476/*
477 * Following functions use READ_ONCE to get atomic access to
478 * all m->flags to avoid taking spinlock
479 */
480static bool must_push_back_rq(struct multipath *m)
481{
482 unsigned long flags = READ_ONCE(m->flags);
483 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
484}
485
486static bool must_push_back_bio(struct multipath *m)
487{
488 unsigned long flags = READ_ONCE(m->flags);
489 return __must_push_back(m, flags);
490}
491
492/*
461 * Map cloned requests (request-based multipath) 493 * Map cloned requests (request-based multipath)
462 */ 494 */
463static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, 495static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
478 pgpath = choose_pgpath(m, nr_bytes); 510 pgpath = choose_pgpath(m, nr_bytes);
479 511
480 if (!pgpath) { 512 if (!pgpath) {
481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 513 if (must_push_back_rq(m))
482 return DM_MAPIO_DELAY_REQUEUE; 514 return DM_MAPIO_DELAY_REQUEUE;
483 dm_report_EIO(m); /* Failed */ 515 dm_report_EIO(m); /* Failed */
484 return DM_MAPIO_KILL; 516 return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
553 } 585 }
554 586
555 if (!pgpath) { 587 if (!pgpath) {
556 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 588 if (must_push_back_bio(m))
557 return DM_MAPIO_REQUEUE; 589 return DM_MAPIO_REQUEUE;
558 dm_report_EIO(m); 590 dm_report_EIO(m);
559 return DM_MAPIO_KILL; 591 return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
651 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, 683 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
652 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || 684 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
653 (!save_old_value && queue_if_no_path)); 685 (!save_old_value && queue_if_no_path));
654 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, 686 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
655 queue_if_no_path || dm_noflush_suspending(m->ti));
656 spin_unlock_irqrestore(&m->lock, flags); 687 spin_unlock_irqrestore(&m->lock, flags);
657 688
658 if (!queue_if_no_path) { 689 if (!queue_if_no_path) {
@@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1486 fail_path(pgpath); 1517 fail_path(pgpath);
1487 1518
1488 if (atomic_read(&m->nr_valid_paths) == 0 && 1519 if (atomic_read(&m->nr_valid_paths) == 0 &&
1489 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1520 !must_push_back_rq(m)) {
1490 if (error == BLK_STS_IOERR) 1521 if (error == BLK_STS_IOERR)
1491 dm_report_EIO(m); 1522 dm_report_EIO(m);
1492 /* complete with the original error */ 1523 /* complete with the original error */
@@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1521 1552
1522 if (atomic_read(&m->nr_valid_paths) == 0 && 1553 if (atomic_read(&m->nr_valid_paths) == 0 &&
1523 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1554 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1524 dm_report_EIO(m); 1555 if (must_push_back_bio(m)) {
1525 *error = BLK_STS_IOERR; 1556 r = DM_ENDIO_REQUEUE;
1557 } else {
1558 dm_report_EIO(m);
1559 *error = BLK_STS_IOERR;
1560 }
1526 goto done; 1561 goto done;
1527 } 1562 }
1528 1563
@@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
1957{ 1992{
1958 int r; 1993 int r;
1959 1994
1960 r = dm_register_target(&multipath_target);
1961 if (r < 0) {
1962 DMERR("request-based register failed %d", r);
1963 r = -EINVAL;
1964 goto bad_register_target;
1965 }
1966
1967 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 1995 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1968 if (!kmultipathd) { 1996 if (!kmultipathd) {
1969 DMERR("failed to create workqueue kmpathd"); 1997 DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
1985 goto bad_alloc_kmpath_handlerd; 2013 goto bad_alloc_kmpath_handlerd;
1986 } 2014 }
1987 2015
2016 r = dm_register_target(&multipath_target);
2017 if (r < 0) {
2018 DMERR("request-based register failed %d", r);
2019 r = -EINVAL;
2020 goto bad_register_target;
2021 }
2022
1988 return 0; 2023 return 0;
1989 2024
2025bad_register_target:
2026 destroy_workqueue(kmpath_handlerd);
1990bad_alloc_kmpath_handlerd: 2027bad_alloc_kmpath_handlerd:
1991 destroy_workqueue(kmultipathd); 2028 destroy_workqueue(kmultipathd);
1992bad_alloc_kmultipathd: 2029bad_alloc_kmultipathd:
1993 dm_unregister_target(&multipath_target);
1994bad_register_target:
1995 return r; 2030 return r;
1996} 2031}
1997 2032
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1113b42e1eda..a0613bd8ed00 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
2411 return r; 2411 return r;
2412 } 2412 }
2413 2413
2414 r = dm_register_target(&snapshot_target);
2415 if (r < 0) {
2416 DMERR("snapshot target register failed %d", r);
2417 goto bad_register_snapshot_target;
2418 }
2419
2420 r = dm_register_target(&origin_target);
2421 if (r < 0) {
2422 DMERR("Origin target register failed %d", r);
2423 goto bad_register_origin_target;
2424 }
2425
2426 r = dm_register_target(&merge_target);
2427 if (r < 0) {
2428 DMERR("Merge target register failed %d", r);
2429 goto bad_register_merge_target;
2430 }
2431
2432 r = init_origin_hash(); 2414 r = init_origin_hash();
2433 if (r) { 2415 if (r) {
2434 DMERR("init_origin_hash failed."); 2416 DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
2449 goto bad_pending_cache; 2431 goto bad_pending_cache;
2450 } 2432 }
2451 2433
2434 r = dm_register_target(&snapshot_target);
2435 if (r < 0) {
2436 DMERR("snapshot target register failed %d", r);
2437 goto bad_register_snapshot_target;
2438 }
2439
2440 r = dm_register_target(&origin_target);
2441 if (r < 0) {
2442 DMERR("Origin target register failed %d", r);
2443 goto bad_register_origin_target;
2444 }
2445
2446 r = dm_register_target(&merge_target);
2447 if (r < 0) {
2448 DMERR("Merge target register failed %d", r);
2449 goto bad_register_merge_target;
2450 }
2451
2452 return 0; 2452 return 0;
2453 2453
2454bad_pending_cache:
2455 kmem_cache_destroy(exception_cache);
2456bad_exception_cache:
2457 exit_origin_hash();
2458bad_origin_hash:
2459 dm_unregister_target(&merge_target);
2460bad_register_merge_target: 2454bad_register_merge_target:
2461 dm_unregister_target(&origin_target); 2455 dm_unregister_target(&origin_target);
2462bad_register_origin_target: 2456bad_register_origin_target:
2463 dm_unregister_target(&snapshot_target); 2457 dm_unregister_target(&snapshot_target);
2464bad_register_snapshot_target: 2458bad_register_snapshot_target:
2459 kmem_cache_destroy(pending_cache);
2460bad_pending_cache:
2461 kmem_cache_destroy(exception_cache);
2462bad_exception_cache:
2463 exit_origin_hash();
2464bad_origin_hash:
2465 dm_exception_store_exit(); 2465 dm_exception_store_exit();
2466 2466
2467 return r; 2467 return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 88130b5d95f9..aaffd0c0ee9a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
453 453
454 refcount_set(&dd->count, 1); 454 refcount_set(&dd->count, 1);
455 list_add(&dd->list, &t->devices); 455 list_add(&dd->list, &t->devices);
456 goto out;
456 457
457 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 458 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
458 r = upgrade_mode(dd, mode, t->md); 459 r = upgrade_mode(dd, mode, t->md);
459 if (r) 460 if (r)
460 return r; 461 return r;
461 refcount_inc(&dd->count);
462 } 462 }
463 463 refcount_inc(&dd->count);
464out:
464 *result = dd->dm_dev; 465 *result = dd->dm_dev;
465 return 0; 466 return 0;
466} 467}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89e5dff9b4cf..f91d771fff4b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
4355 4355
4356static int __init dm_thin_init(void) 4356static int __init dm_thin_init(void)
4357{ 4357{
4358 int r; 4358 int r = -ENOMEM;
4359 4359
4360 pool_table_init(); 4360 pool_table_init();
4361 4361
4362 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4363 if (!_new_mapping_cache)
4364 return r;
4365
4362 r = dm_register_target(&thin_target); 4366 r = dm_register_target(&thin_target);
4363 if (r) 4367 if (r)
4364 return r; 4368 goto bad_new_mapping_cache;
4365 4369
4366 r = dm_register_target(&pool_target); 4370 r = dm_register_target(&pool_target);
4367 if (r) 4371 if (r)
4368 goto bad_pool_target; 4372 goto bad_thin_target;
4369
4370 r = -ENOMEM;
4371
4372 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4373 if (!_new_mapping_cache)
4374 goto bad_new_mapping_cache;
4375 4373
4376 return 0; 4374 return 0;
4377 4375
4378bad_new_mapping_cache: 4376bad_thin_target:
4379 dm_unregister_target(&pool_target);
4380bad_pool_target:
4381 dm_unregister_target(&thin_target); 4377 dm_unregister_target(&thin_target);
4378bad_new_mapping_cache:
4379 kmem_cache_destroy(_new_mapping_cache);
4382 4380
4383 return r; 4381 return r;
4384} 4382}
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35c7de9..0e30ee1c8677 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2077,8 +2077,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
2077 } else { 2077 } else {
2078 ret = of_property_read_u32(child, "bank-width", 2078 ret = of_property_read_u32(child, "bank-width",
2079 &gpmc_s.device_width); 2079 &gpmc_s.device_width);
2080 if (ret < 0) { 2080 if (ret < 0 && !gpmc_s.device_width) {
2081 dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n", 2081 dev_err(&pdev->dev,
2082 "%pOF has no 'gpmc,device-width' property\n",
2082 child); 2083 child);
2083 goto err; 2084 goto err;
2084 } 2085 }
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 305a7a464d09..4d63ac8a82e0 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -562,7 +562,7 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
562static int at24_read(void *priv, unsigned int off, void *val, size_t count) 562static int at24_read(void *priv, unsigned int off, void *val, size_t count)
563{ 563{
564 struct at24_data *at24 = priv; 564 struct at24_data *at24 = priv;
565 struct i2c_client *client; 565 struct device *dev = &at24->client[0]->dev;
566 char *buf = val; 566 char *buf = val;
567 int ret; 567 int ret;
568 568
@@ -572,11 +572,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
572 if (off + count > at24->chip.byte_len) 572 if (off + count > at24->chip.byte_len)
573 return -EINVAL; 573 return -EINVAL;
574 574
575 client = at24_translate_offset(at24, &off); 575 ret = pm_runtime_get_sync(dev);
576
577 ret = pm_runtime_get_sync(&client->dev);
578 if (ret < 0) { 576 if (ret < 0) {
579 pm_runtime_put_noidle(&client->dev); 577 pm_runtime_put_noidle(dev);
580 return ret; 578 return ret;
581 } 579 }
582 580
@@ -592,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
592 status = at24->read_func(at24, buf, off, count); 590 status = at24->read_func(at24, buf, off, count);
593 if (status < 0) { 591 if (status < 0) {
594 mutex_unlock(&at24->lock); 592 mutex_unlock(&at24->lock);
595 pm_runtime_put(&client->dev); 593 pm_runtime_put(dev);
596 return status; 594 return status;
597 } 595 }
598 buf += status; 596 buf += status;
@@ -602,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
602 600
603 mutex_unlock(&at24->lock); 601 mutex_unlock(&at24->lock);
604 602
605 pm_runtime_put(&client->dev); 603 pm_runtime_put(dev);
606 604
607 return 0; 605 return 0;
608} 606}
@@ -610,7 +608,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
610static int at24_write(void *priv, unsigned int off, void *val, size_t count) 608static int at24_write(void *priv, unsigned int off, void *val, size_t count)
611{ 609{
612 struct at24_data *at24 = priv; 610 struct at24_data *at24 = priv;
613 struct i2c_client *client; 611 struct device *dev = &at24->client[0]->dev;
614 char *buf = val; 612 char *buf = val;
615 int ret; 613 int ret;
616 614
@@ -620,11 +618,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
620 if (off + count > at24->chip.byte_len) 618 if (off + count > at24->chip.byte_len)
621 return -EINVAL; 619 return -EINVAL;
622 620
623 client = at24_translate_offset(at24, &off); 621 ret = pm_runtime_get_sync(dev);
624
625 ret = pm_runtime_get_sync(&client->dev);
626 if (ret < 0) { 622 if (ret < 0) {
627 pm_runtime_put_noidle(&client->dev); 623 pm_runtime_put_noidle(dev);
628 return ret; 624 return ret;
629 } 625 }
630 626
@@ -640,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
640 status = at24->write_func(at24, buf, off, count); 636 status = at24->write_func(at24, buf, off, count);
641 if (status < 0) { 637 if (status < 0) {
642 mutex_unlock(&at24->lock); 638 mutex_unlock(&at24->lock);
643 pm_runtime_put(&client->dev); 639 pm_runtime_put(dev);
644 return status; 640 return status;
645 } 641 }
646 buf += status; 642 buf += status;
@@ -650,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
650 646
651 mutex_unlock(&at24->lock); 647 mutex_unlock(&at24->lock);
652 648
653 pm_runtime_put(&client->dev); 649 pm_runtime_put(dev);
654 650
655 return 0; 651 return 0;
656} 652}
@@ -880,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
880 at24->nvmem_config.reg_read = at24_read; 876 at24->nvmem_config.reg_read = at24_read;
881 at24->nvmem_config.reg_write = at24_write; 877 at24->nvmem_config.reg_write = at24_write;
882 at24->nvmem_config.priv = at24; 878 at24->nvmem_config.priv = at24;
883 at24->nvmem_config.stride = 4; 879 at24->nvmem_config.stride = 1;
884 at24->nvmem_config.word_size = 1; 880 at24->nvmem_config.word_size = 1;
885 at24->nvmem_config.size = chip.byte_len; 881 at24->nvmem_config.size = chip.byte_len;
886 882
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index eda38cbe8530..41f2a9f6851d 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -32,7 +32,7 @@
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
35#include <linux/pti.h> 35#include <linux/intel-pti.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38 38
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index f06cd91964ce..79a5b985ccf5 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -75,9 +75,11 @@ struct mmc_fixup {
75#define EXT_CSD_REV_ANY (-1u) 75#define EXT_CSD_REV_ANY (-1u)
76 76
77#define CID_MANFID_SANDISK 0x2 77#define CID_MANFID_SANDISK 0x2
78#define CID_MANFID_ATP 0x9
78#define CID_MANFID_TOSHIBA 0x11 79#define CID_MANFID_TOSHIBA 0x11
79#define CID_MANFID_MICRON 0x13 80#define CID_MANFID_MICRON 0x13
80#define CID_MANFID_SAMSUNG 0x15 81#define CID_MANFID_SAMSUNG 0x15
82#define CID_MANFID_APACER 0x27
81#define CID_MANFID_KINGSTON 0x70 83#define CID_MANFID_KINGSTON 0x70
82#define CID_MANFID_HYNIX 0x90 84#define CID_MANFID_HYNIX 0x90
83 85
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d209fb466979..208a762b87ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1290,7 +1290,7 @@ out_err:
1290 1290
1291static void mmc_select_driver_type(struct mmc_card *card) 1291static void mmc_select_driver_type(struct mmc_card *card)
1292{ 1292{
1293 int card_drv_type, drive_strength, drv_type; 1293 int card_drv_type, drive_strength, drv_type = 0;
1294 int fixed_drv_type = card->host->fixed_drv_type; 1294 int fixed_drv_type = card->host->fixed_drv_type;
1295 1295
1296 card_drv_type = card->ext_csd.raw_driver_strength | 1296 card_drv_type = card->ext_csd.raw_driver_strength |
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f664e9cbc9f8..75d317623852 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
53 MMC_QUIRK_BLK_NO_CMD23), 53 MMC_QUIRK_BLK_NO_CMD23),
54 54
55 /* 55 /*
56 * Some SD cards lockup while using CMD23 multiblock transfers.
57 */
58 MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
59 MMC_QUIRK_BLK_NO_CMD23),
60 MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
61 MMC_QUIRK_BLK_NO_CMD23),
62
63 /*
56 * Some MMC cards need longer data read timeout than indicated in CSD. 64 * Some MMC cards need longer data read timeout than indicated in CSD.
57 */ 65 */
58 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 66 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6668a5..6315774d72b3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; 338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
339 break; 339 break;
340 case PHY_INTERFACE_MODE_XGMII: 340 case PHY_INTERFACE_MODE_XGMII:
341 case PHY_INTERFACE_MODE_XAUI:
341 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; 342 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
342 break; 343 break;
343 case PHY_INTERFACE_MODE_RXAUI: 344 case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e796870595..105fdb958cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U 50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
51#define AQ_CFG_PCI_FUNC_PORTS 2U 51#define AQ_CFG_PCI_FUNC_PORTS 2U
52 52
53#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) 53#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) 54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
55 55
56#define AQ_CFG_SKB_FRAGS_MAX 32U 56#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -80,6 +80,7 @@
80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ 80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\ 81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\
82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\ 82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\
83 __stringify(NIC_REVISION_DRIVER_VERSION) 83 __stringify(NIC_REVISION_DRIVER_VERSION) \
84 AQ_CFG_DRV_VERSION_SUFFIX
84 85
85#endif /* AQ_CFG_H */ 86#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb7467bf3..f2d8063a2cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
66 "OutUCast", 66 "OutUCast",
67 "OutMCast", 67 "OutMCast",
68 "OutBCast", 68 "OutBCast",
69 "InUCastOctects", 69 "InUCastOctets",
70 "OutUCastOctects", 70 "OutUCastOctets",
71 "InMCastOctects", 71 "InMCastOctets",
72 "OutMCastOctects", 72 "OutMCastOctets",
73 "InBCastOctects", 73 "InBCastOctets",
74 "OutBCastOctects", 74 "OutBCastOctets",
75 "InOctects", 75 "InOctets",
76 "OutOctects", 76 "OutOctets",
77 "InPacketsDma", 77 "InPacketsDma",
78 "OutPacketsDma", 78 "OutPacketsDma",
79 "InOctetsDma", 79 "InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927dc8a6..b3825de6cdfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
46 unsigned int mbps; 46 unsigned int mbps;
47}; 47};
48 48
49struct aq_stats_s {
50 u64 uprc;
51 u64 mprc;
52 u64 bprc;
53 u64 erpt;
54 u64 uptc;
55 u64 mptc;
56 u64 bptc;
57 u64 erpr;
58 u64 mbtc;
59 u64 bbtc;
60 u64 mbrc;
61 u64 bbrc;
62 u64 ubrc;
63 u64 ubtc;
64 u64 dpc;
65 u64 dma_pkt_rc;
66 u64 dma_pkt_tc;
67 u64 dma_oct_rc;
68 u64 dma_oct_tc;
69};
70
49#define AQ_HW_IRQ_INVALID 0U 71#define AQ_HW_IRQ_INVALID 0U
50#define AQ_HW_IRQ_LEGACY 1U 72#define AQ_HW_IRQ_LEGACY 1U
51#define AQ_HW_IRQ_MSI 2U 73#define AQ_HW_IRQ_MSI 2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
85 void (*destroy)(struct aq_hw_s *self); 107 void (*destroy)(struct aq_hw_s *self);
86 108
87 int (*get_hw_caps)(struct aq_hw_s *self, 109 int (*get_hw_caps)(struct aq_hw_s *self,
88 struct aq_hw_caps_s *aq_hw_caps); 110 struct aq_hw_caps_s *aq_hw_caps,
111 unsigned short device,
112 unsigned short subsystem_device);
89 113
90 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 114 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
91 unsigned int frags); 115 unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
164 188
165 int (*hw_update_stats)(struct aq_hw_s *self); 189 int (*hw_update_stats)(struct aq_hw_s *self);
166 190
167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 191 struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
168 unsigned int *p_count);
169 192
170 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 193 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
171 194
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 78dfb2ab78ce..75a894a9251c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39 39
40static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
41
40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 42static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
41{ 43{
42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 44 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
166static void aq_nic_service_timer_cb(struct timer_list *t) 168static void aq_nic_service_timer_cb(struct timer_list *t)
167{ 169{
168 struct aq_nic_s *self = from_timer(self, t, service_timer); 170 struct aq_nic_s *self = from_timer(self, t, service_timer);
169 struct net_device *ndev = aq_nic_get_ndev(self); 171 int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
170 int err = 0; 172 int err = 0;
171 unsigned int i = 0U;
172 struct aq_ring_stats_rx_s stats_rx;
173 struct aq_ring_stats_tx_s stats_tx;
174 173
175 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 174 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
176 goto err_exit; 175 goto err_exit;
@@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
182 if (self->aq_hw_ops.hw_update_stats) 181 if (self->aq_hw_ops.hw_update_stats)
183 self->aq_hw_ops.hw_update_stats(self->aq_hw); 182 self->aq_hw_ops.hw_update_stats(self->aq_hw);
184 183
185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 184 aq_nic_update_ndev_stats(self);
186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
187 for (i = AQ_DIMOF(self->aq_vec); i--;) {
188 if (self->aq_vec[i])
189 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
190 }
191 185
192 ndev->stats.rx_packets = stats_rx.packets; 186 /* If no link - use faster timer rate to detect link up asap */
193 ndev->stats.rx_bytes = stats_rx.bytes; 187 if (!netif_carrier_ok(self->ndev))
194 ndev->stats.rx_errors = stats_rx.errors; 188 ctimer = max(ctimer / 2, 1);
195 ndev->stats.tx_packets = stats_tx.packets;
196 ndev->stats.tx_bytes = stats_tx.bytes;
197 ndev->stats.tx_errors = stats_tx.errors;
198 189
199err_exit: 190err_exit:
200 mod_timer(&self->service_timer, 191 mod_timer(&self->service_timer, jiffies + ctimer);
201 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
202} 192}
203 193
204static void aq_nic_polling_timer_cb(struct timer_list *t) 194static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
222 212
223struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 213struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
224 const struct ethtool_ops *et_ops, 214 const struct ethtool_ops *et_ops,
225 struct device *dev, 215 struct pci_dev *pdev,
226 struct aq_pci_func_s *aq_pci_func, 216 struct aq_pci_func_s *aq_pci_func,
227 unsigned int port, 217 unsigned int port,
228 const struct aq_hw_ops *aq_hw_ops) 218 const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
242 ndev->netdev_ops = ndev_ops; 232 ndev->netdev_ops = ndev_ops;
243 ndev->ethtool_ops = et_ops; 233 ndev->ethtool_ops = et_ops;
244 234
245 SET_NETDEV_DEV(ndev, dev); 235 SET_NETDEV_DEV(ndev, &pdev->dev);
246 236
247 ndev->if_port = port; 237 ndev->if_port = port;
248 self->ndev = ndev; 238 self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
254 244
255 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 245 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
256 &self->aq_hw_ops); 246 &self->aq_hw_ops);
257 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 247 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
248 pdev->device, pdev->subsystem_device);
258 if (err < 0) 249 if (err < 0)
259 goto err_exit; 250 goto err_exit;
260 251
@@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
749 740
750void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 741void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
751{ 742{
752 struct aq_vec_s *aq_vec = NULL;
753 unsigned int i = 0U; 743 unsigned int i = 0U;
754 unsigned int count = 0U; 744 unsigned int count = 0U;
755 int err = 0; 745 struct aq_vec_s *aq_vec = NULL;
746 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
756 747
757 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 748 if (!stats)
758 if (err < 0)
759 goto err_exit; 749 goto err_exit;
760 750
761 data += count; 751 data[i] = stats->uprc + stats->mprc + stats->bprc;
752 data[++i] = stats->uprc;
753 data[++i] = stats->mprc;
754 data[++i] = stats->bprc;
755 data[++i] = stats->erpt;
756 data[++i] = stats->uptc + stats->mptc + stats->bptc;
757 data[++i] = stats->uptc;
758 data[++i] = stats->mptc;
759 data[++i] = stats->bptc;
760 data[++i] = stats->ubrc;
761 data[++i] = stats->ubtc;
762 data[++i] = stats->mbrc;
763 data[++i] = stats->mbtc;
764 data[++i] = stats->bbrc;
765 data[++i] = stats->bbtc;
766 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
767 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
768 data[++i] = stats->dma_pkt_rc;
769 data[++i] = stats->dma_pkt_tc;
770 data[++i] = stats->dma_oct_rc;
771 data[++i] = stats->dma_oct_tc;
772 data[++i] = stats->dpc;
773
774 i++;
775
776 data += i;
762 count = 0U; 777 count = 0U;
763 778
764 for (i = 0U, aq_vec = self->aq_vec[0]; 779 for (i = 0U, aq_vec = self->aq_vec[0];
@@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
768 } 783 }
769 784
770err_exit:; 785err_exit:;
771 (void)err; 786}
787
788static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
789{
790 struct net_device *ndev = self->ndev;
791 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
792
793 ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
794 ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
795 ndev->stats.rx_errors = stats->erpr;
796 ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
797 ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
798 ndev->stats.tx_errors = stats->erpt;
799 ndev->stats.multicast = stats->mprc;
772} 800}
773 801
774void aq_nic_get_link_ksettings(struct aq_nic_s *self, 802void aq_nic_get_link_ksettings(struct aq_nic_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983acdd6..3c9f8db03d5f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
71 71
72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
73 const struct ethtool_ops *et_ops, 73 const struct ethtool_ops *et_ops,
74 struct device *dev, 74 struct pci_dev *pdev,
75 struct aq_pci_func_s *aq_pci_func, 75 struct aq_pci_func_s *aq_pci_func,
76 unsigned int port, 76 unsigned int port,
77 const struct aq_hw_ops *aq_hw_ops); 77 const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa646c89f..58c29d04b186 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
51 pci_set_drvdata(pdev, self); 51 pci_set_drvdata(pdev, self);
52 self->pdev = pdev; 52 self->pdev = pdev;
53 53
54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); 54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
55 pdev->subsystem_device);
55 if (err < 0) 56 if (err < 0)
56 goto err_exit; 57 goto err_exit;
57 58
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
59 60
60 for (port = 0; port < self->ports; ++port) { 61 for (port = 0; port < self->ports; ++port) {
61 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, 62 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
62 &pdev->dev, self, 63 pdev, self,
63 port, aq_hw_ops); 64 port, aq_hw_ops);
64 65
65 if (!aq_nic) { 66 if (!aq_nic) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49a16a4..f18dce14c93c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -18,9 +18,20 @@
18#include "hw_atl_a0_internal.h" 18#include "hw_atl_a0_internal.h"
19 19
20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, 20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 21 struct aq_hw_caps_s *aq_hw_caps,
22 unsigned short device,
23 unsigned short subsystem_device)
22{ 24{
23 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); 25 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
26
27 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
28 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
29
30 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
31 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
33 }
34
24 return 0; 35 return 0;
25} 36}
26 37
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
333 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 344 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
334 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 345 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
335 346
347 /* Reset link status and read out initial hardware counters */
348 self->aq_link_status.mbps = 0;
349 hw_atl_utils_update_stats(self);
350
336 err = aq_hw_err_from_flags(self); 351 err = aq_hw_err_from_flags(self);
337 if (err < 0) 352 if (err < 0)
338 goto err_exit; 353 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20efcbd..e4a22ce7bf09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -16,11 +16,23 @@
16#include "hw_atl_utils.h" 16#include "hw_atl_utils.h"
17#include "hw_atl_llh.h" 17#include "hw_atl_llh.h"
18#include "hw_atl_b0_internal.h" 18#include "hw_atl_b0_internal.h"
19#include "hw_atl_llh_internal.h"
19 20
20static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, 21static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 22 struct aq_hw_caps_s *aq_hw_caps,
23 unsigned short device,
24 unsigned short subsystem_device)
22{ 25{
23 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); 26 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
27
28 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
29 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
30
31 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
33 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
34 }
35
24 return 0; 36 return 0;
25} 37}
26 38
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
357 }; 369 };
358 370
359 int err = 0; 371 int err = 0;
372 u32 val;
360 373
361 self->aq_nic_cfg = aq_nic_cfg; 374 self->aq_nic_cfg = aq_nic_cfg;
362 375
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
374 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 387 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
375 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 388 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
376 389
390 /* Force limit MRRS on RDM/TDM to 2K */
391 val = aq_hw_read_reg(self, pci_reg_control6_adr);
392 aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
393
394 /* TX DMA total request limit. B0 hardware is not capable to
395 * handle more than (8K-MRRS) incoming DMA data.
396 * Value 24 in 256byte units
397 */
398 aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
399
400 /* Reset link status and read out initial hardware counters */
401 self->aq_link_status.mbps = 0;
402 hw_atl_utils_update_stats(self);
403
377 err = aq_hw_err_from_flags(self); 404 err = aq_hw_err_from_flags(self);
378 if (err < 0) 405 if (err < 0)
379 goto err_exit; 406 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0e5942..93450ec930e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2343,6 +2343,9 @@
2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \ 2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \
2344 (0x00007c04u + (descriptor) * 0x40) 2344 (0x00007c04u + (descriptor) * 0x40)
2345 2345
2346/* tx dma total request limit */
2347#define tx_dma_total_req_limit_adr 0x00007b20u
2348
2346/* tx interrupt moderation control register definitions 2349/* tx interrupt moderation control register definitions
2347 * Preprocessor definitions for TX Interrupt Moderation Control Register 2350 * Preprocessor definitions for TX Interrupt Moderation Control Register
2348 * Base Address: 0x00008980 2351 * Base Address: 0x00008980
@@ -2369,6 +2372,9 @@
2369/* default value of bitfield reg_res_dsbl */ 2372/* default value of bitfield reg_res_dsbl */
2370#define pci_reg_res_dsbl_default 0x1 2373#define pci_reg_res_dsbl_default 0x1
2371 2374
2375/* PCI core control register */
2376#define pci_reg_control6_adr 0x1014u
2377
2372/* global microprocessor scratch pad definitions */ 2378/* global microprocessor scratch pad definitions */
2373#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) 2379#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
2374 2380
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016fc4bc7..f2ce12ed4218 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox; 504 struct hw_aq_atl_utils_mbox mbox;
505 505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox); 506 hw_atl_utils_mpi_read_stats(self, &mbox);
510 507
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ 508#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_) 509 mbox.stats._N_ - hw_self->last_stats._N_)
513 510 if (self->aq_link_status.mbps) {
514 AQ_SDELTA(uprc); 511 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc); 512 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc); 513 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt); 514 AQ_SDELTA(erpt);
518 515
519 AQ_SDELTA(uptc); 516 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc); 517 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc); 518 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr); 519 AQ_SDELTA(erpr);
523 520
524 AQ_SDELTA(ubrc); 521 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc); 522 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc); 523 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc); 524 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc); 525 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc); 526 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc); 527 AQ_SDELTA(dpc);
531 528 }
532#undef AQ_SDELTA 529#undef AQ_SDELTA
530 hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
531 hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
532 hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
533 hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
533 534
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); 535 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535 536
536 return 0; 537 return 0;
537} 538}
538 539
539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 540struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
540 u64 *data, unsigned int *p_count)
541{ 541{
542 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 542 return &PHAL_ATLANTIC->curr_stats;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
544 int i = 0;
545
546 data[i] = stats->uprc + stats->mprc + stats->bprc;
547 data[++i] = stats->uprc;
548 data[++i] = stats->mprc;
549 data[++i] = stats->bprc;
550 data[++i] = stats->erpt;
551 data[++i] = stats->uptc + stats->mptc + stats->bptc;
552 data[++i] = stats->uptc;
553 data[++i] = stats->mptc;
554 data[++i] = stats->bptc;
555 data[++i] = stats->ubrc;
556 data[++i] = stats->ubtc;
557 data[++i] = stats->mbrc;
558 data[++i] = stats->mbtc;
559 data[++i] = stats->bbrc;
560 data[++i] = stats->bbtc;
561 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
562 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
563 data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
564 data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
565 data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
566 data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
567 data[++i] = stats->dpc;
568
569 if (p_count)
570 *p_count = ++i;
571
572 return 0;
573} 543}
574 544
575static const u32 hw_atl_utils_hw_mac_regs[] = { 545static const u32 hw_atl_utils_hw_mac_regs[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc690e425..21aeca6908d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
129struct __packed hw_atl_s { 129struct __packed hw_atl_s {
130 struct aq_hw_s base; 130 struct aq_hw_s base;
131 struct hw_atl_stats_s last_stats; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats; 132 struct aq_stats_s curr_stats;
133 u64 speed; 133 u64 speed;
134 unsigned int chip_features; 134 unsigned int chip_features;
135 u32 fw_ver_actual; 135 u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
207 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self); 208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209 209
210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
211 u64 *data,
212 unsigned int *p_count);
213 211
214#endif /* HW_ATL_UTILS_H */ 212#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d215c2..9009f2651e70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -11,8 +11,10 @@
11#define VER_H 11#define VER_H
12 12
13#define NIC_MAJOR_DRIVER_VERSION 1 13#define NIC_MAJOR_DRIVER_VERSION 1
14#define NIC_MINOR_DRIVER_VERSION 5 14#define NIC_MINOR_DRIVER_VERSION 6
15#define NIC_BUILD_DRIVER_VERSION 345 15#define NIC_BUILD_DRIVER_VERSION 13
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 0
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19
18#endif /* VER_H */ 20#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c6163874e4e7..16f9bee992fe 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
199 199
200 /* RMII interface needs always a rate of 50MHz */ 200 /* RMII interface needs always a rate of 50MHz */
201 err = clk_set_rate(priv->refclk, 50000000); 201 err = clk_set_rate(priv->refclk, 50000000);
202 if (err) 202 if (err) {
203 dev_err(dev, 203 dev_err(dev,
204 "failed to change reference clock rate (%d)\n", err); 204 "failed to change reference clock rate (%d)\n", err);
205 goto out_regulator_disable;
206 }
205 207
206 if (priv->soc_data->need_div_macclk) { 208 if (priv->soc_data->need_div_macclk) {
207 priv->macclk = devm_clk_get(dev, "macclk"); 209 priv->macclk = devm_clk_get(dev, "macclk");
@@ -230,12 +232,14 @@ static int emac_rockchip_probe(struct platform_device *pdev)
230 err = arc_emac_probe(ndev, interface); 232 err = arc_emac_probe(ndev, interface);
231 if (err) { 233 if (err) {
232 dev_err(dev, "failed to probe arc emac (%d)\n", err); 234 dev_err(dev, "failed to probe arc emac (%d)\n", err);
233 goto out_regulator_disable; 235 goto out_clk_disable_macclk;
234 } 236 }
235 237
236 return 0; 238 return 0;
239
237out_clk_disable_macclk: 240out_clk_disable_macclk:
238 clk_disable_unprepare(priv->macclk); 241 if (priv->soc_data->need_div_macclk)
242 clk_disable_unprepare(priv->macclk);
239out_regulator_disable: 243out_regulator_disable:
240 if (priv->regulator) 244 if (priv->regulator)
241 regulator_disable(priv->regulator); 245 regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6e423f098a60..31efc47c847e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
4081 if (hw->ports > 1) { 4081 if (hw->ports > 1) {
4082 skge_write32(hw, B0_IMSK, 0); 4082 skge_write32(hw, B0_IMSK, 0);
4083 skge_read32(hw, B0_IMSK); 4083 skge_read32(hw, B0_IMSK);
4084 free_irq(pdev->irq, hw);
4085 } 4084 }
4086 spin_unlock_irq(&hw->hw_lock); 4085 spin_unlock_irq(&hw->hw_lock);
4087 4086
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695318e6..1fa4849a6f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
188 struct net_device *dev = mdev->pndev[port]; 188 struct net_device *dev = mdev->pndev[port];
189 struct mlx4_en_priv *priv = netdev_priv(dev); 189 struct mlx4_en_priv *priv = netdev_priv(dev);
190 struct net_device_stats *stats = &dev->stats; 190 struct net_device_stats *stats = &dev->stats;
191 struct mlx4_cmd_mailbox *mailbox; 191 struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
192 u64 in_mod = reset << 8 | port; 192 u64 in_mod = reset << 8 | port;
193 int err; 193 int err;
194 int i, counter_index; 194 int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
199 if (IS_ERR(mailbox)) 199 if (IS_ERR(mailbox))
200 return PTR_ERR(mailbox); 200 return PTR_ERR(mailbox);
201
202 mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
203 if (IS_ERR(mailbox_priority)) {
204 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
205 return PTR_ERR(mailbox_priority);
206 }
207
201 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 208 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
202 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 209 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
203 MLX4_CMD_NATIVE); 210 MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 213
207 mlx4_en_stats = mailbox->buf; 214 mlx4_en_stats = mailbox->buf;
208 215
216 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
217 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
218 err = mlx4_get_counter_stats(mdev->dev, counter_index,
219 &tmp_counter_stats, reset);
220
221 /* 0xffs indicates invalid value */
222 memset(mailbox_priority->buf, 0xff,
223 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
224
225 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
226 memset(mailbox_priority->buf, 0,
227 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
228 err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
229 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
230 0, MLX4_CMD_DUMP_ETH_STATS,
231 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
232 if (err)
233 goto out;
234 }
235
236 flowstats = mailbox_priority->buf;
237
209 spin_lock_bh(&priv->stats_lock); 238 spin_lock_bh(&priv->stats_lock);
210 239
211 mlx4_en_fold_software_stats(dev); 240 mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
345 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); 374 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
346 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); 375 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
347 376
348 spin_unlock_bh(&priv->stats_lock);
349
350 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
351 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
352 err = mlx4_get_counter_stats(mdev->dev, counter_index,
353 &tmp_counter_stats, reset);
354
355 /* 0xffs indicates invalid value */
356 memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
357
358 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
359 memset(mailbox->buf, 0,
360 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
361 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
362 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
363 0, MLX4_CMD_DUMP_ETH_STATS,
364 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
365 if (err)
366 goto out;
367 }
368
369 flowstats = mailbox->buf;
370
371 spin_lock_bh(&priv->stats_lock);
372
373 if (tmp_counter_stats.counter_mode == 0) { 377 if (tmp_counter_stats.counter_mode == 0) {
374 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); 378 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
375 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); 379 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
410 414
411out: 415out:
412 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 416 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
417 mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
413 return err; 418 return err;
414} 419}
415 420
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b181946..946d9db7c8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
185 if (priv->mdev->dev->caps.flags & 185 if (priv->mdev->dev->caps.flags &
186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
187 buf[3] = mlx4_en_test_registers(priv); 187 buf[3] = mlx4_en_test_registers(priv);
188 if (priv->port_up) 188 if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
189 buf[4] = mlx4_en_test_loopback(priv); 189 buf[4] = mlx4_en_test_loopback(priv);
190 } 190 }
191 191
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e279a7e0..2b72677eccd4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -153,6 +153,9 @@
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) 155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
156#define PREAMBLE_LEN 8
157#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
158 ETH_HLEN + PREAMBLE_LEN)
156 159
157#define MLX4_EN_MIN_MTU 46 160#define MLX4_EN_MIN_MTU 46
158/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple 161/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd894c6..606a0e0beeae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
611 MLX4_MAX_PORTS; 611 MLX4_MAX_PORTS;
612 else 612 else
613 res_alloc->guaranteed[t] = 0; 613 res_alloc->guaranteed[t] = 0;
614 res_alloc->res_free -= res_alloc->guaranteed[t];
615 break; 614 break;
616 default: 615 default:
617 break; 616 break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 2d0897b7d860..9bd8d28de152 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4300 4300
4301static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4301static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4302{ 4302{
4303 u16 vid = 1;
4303 int err; 4304 int err;
4304 4305
4305 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4306 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4312 true, false); 4313 true, false);
4313 if (err) 4314 if (err)
4314 goto err_port_vlan_set; 4315 goto err_port_vlan_set;
4316
4317 for (; vid <= VLAN_N_VID - 1; vid++) {
4318 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4319 vid, false);
4320 if (err)
4321 goto err_vid_learning_set;
4322 }
4323
4315 return 0; 4324 return 0;
4316 4325
4326err_vid_learning_set:
4327 for (vid--; vid >= 1; vid--)
4328 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4317err_port_vlan_set: 4329err_port_vlan_set:
4318 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4330 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4319err_port_stp_set: 4331err_port_stp_set:
@@ -4323,6 +4335,12 @@ err_port_stp_set:
4323 4335
4324static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4336static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4325{ 4337{
4338 u16 vid;
4339
4340 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4341 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4342 vid, true);
4343
4326 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4344 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4327 false, false); 4345 false, false);
4328 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4346 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fcb9815..53dbf1e163a8 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
47#define MDIO_CLK_25_28 7 47#define MDIO_CLK_25_28 7
48 48
49#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
50#define MDIO_STATUS_DELAY_TIME 1
50 51
51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 52static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
52{ 53{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
65 66
66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 67 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
67 !(reg & (MDIO_START | MDIO_BUSY)), 68 !(reg & (MDIO_START | MDIO_BUSY)),
68 100, MDIO_WAIT_TIMES * 100)) 69 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
69 return -EIO; 70 return -EIO;
70 71
71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 72 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
88 writel(reg, adpt->base + EMAC_MDIO_CTRL); 89 writel(reg, adpt->base + EMAC_MDIO_CTRL);
89 90
90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 91 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
91 !(reg & (MDIO_START | MDIO_BUSY)), 100, 92 !(reg & (MDIO_START | MDIO_BUSY)),
92 MDIO_WAIT_TIMES * 100)) 93 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
93 return -EIO; 94 return -EIO;
94 95
95 return 0; 96 return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d349f5f..009780df664b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
2308 struct ravb_private *priv = netdev_priv(ndev); 2308 struct ravb_private *priv = netdev_priv(ndev);
2309 int ret = 0; 2309 int ret = 0;
2310 2310
2311 if (priv->wol_enabled) { 2311 /* If WoL is enabled set reset mode to rearm the WoL logic */
2312 /* Reduce the usecount of the clock to zero and then 2312 if (priv->wol_enabled)
2313 * restore it to its original value. This is done to force
2314 * the clock to be re-enabled which is a workaround
2315 * for renesas-cpg-mssr driver which do not enable clocks
2316 * when resuming from PSCI suspend/resume.
2317 *
2318 * Without this workaround the driver fails to communicate
2319 * with the hardware if WoL was enabled when the system
2320 * entered PSCI suspend. This is due to that if WoL is enabled
2321 * we explicitly keep the clock from being turned off when
2322 * suspending, but in PSCI sleep power is cut so the clock
2323 * is disabled anyhow, the clock driver is not aware of this
2324 * so the clock is not turned back on when resuming.
2325 *
2326 * TODO: once the renesas-cpg-mssr suspend/resume is working
2327 * this clock dance should be removed.
2328 */
2329 clk_disable(priv->clk);
2330 clk_disable(priv->clk);
2331 clk_enable(priv->clk);
2332 clk_enable(priv->clk);
2333
2334 /* Set reset mode to rearm the WoL logic */
2335 ravb_write(ndev, CCC_OPC_RESET, CCC); 2313 ravb_write(ndev, CCC_OPC_RESET, CCC);
2336 }
2337 2314
2338 /* All register have been reset to default values. 2315 /* All register have been reset to default values.
2339 * Restore all registers which where setup at probe time and 2316 * Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index db72d13cebb9..75323000c364 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1892,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
1892 return PTR_ERR(phydev); 1892 return PTR_ERR(phydev);
1893 } 1893 }
1894 1894
1895 /* mask with MAC supported features */
1896 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1897 int err = phy_set_max_speed(phydev, SPEED_100);
1898 if (err) {
1899 netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1900 phy_disconnect(phydev);
1901 return err;
1902 }
1903 }
1904
1895 phy_attached_info(phydev); 1905 phy_attached_info(phydev);
1896 1906
1897 return 0; 1907 return 0;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03d5a41..1ab97d99b9ba 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
1379 rrpriv->info_dma); 1379 rrpriv->info_dma);
1380 rrpriv->info = NULL; 1380 rrpriv->info = NULL;
1381 1381
1382 free_irq(pdev->irq, dev);
1383 spin_unlock_irqrestore(&rrpriv->lock, flags); 1382 spin_unlock_irqrestore(&rrpriv->lock, flags);
1383 free_irq(pdev->irq, dev);
1384 1384
1385 return 0; 1385 return 0;
1386} 1386}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5f93e6add563..e911e4990b20 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev)
239{ 239{
240 int value; 240 int value;
241 241
242 mutex_lock(&phydev->lock);
243
244 value = phy_read(phydev, MII_BMCR); 242 value = phy_read(phydev, MII_BMCR);
245 value &= ~(BMCR_PDOWN | BMCR_ISOLATE); 243 value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
246 phy_write(phydev, MII_BMCR, value); 244 phy_write(phydev, MII_BMCR, value);
247 245
248 mutex_unlock(&phydev->lock);
249
250 return 0; 246 return 0;
251} 247}
252 248
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4d02b27df044..b5a8f750e433 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
637 if (err < 0) 637 if (err < 0)
638 goto error; 638 goto error;
639 639
640 /* Do not touch the fiber page if we're in copper->sgmii mode */
641 if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
642 return 0;
643
640 /* Then the fiber link */ 644 /* Then the fiber link */
641 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); 645 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
642 if (err < 0) 646 if (err < 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2df7b62c1a36..54d00a1d2bef 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
270 270
271 if (addr == mdiodev->addr) { 271 if (addr == mdiodev->addr) {
272 dev->of_node = child; 272 dev->of_node = child;
273 dev->fwnode = of_fwnode_handle(child);
273 return; 274 return;
274 } 275 }
275 } 276 }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 1ea69b7585d9..842eb871a6e3 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,6 +22,7 @@
22#include <linux/ethtool.h> 22#include <linux/ethtool.h>
23#include <linux/phy.h> 23#include <linux/phy.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/bitfield.h>
25 26
26static int meson_gxl_config_init(struct phy_device *phydev) 27static int meson_gxl_config_init(struct phy_device *phydev)
27{ 28{
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
50 return 0; 51 return 0;
51} 52}
52 53
54/* This function is provided to cope with the possible failures of this phy
55 * during aneg process. When aneg fails, the PHY reports that aneg is done
56 * but the value found in MII_LPA is wrong:
57 * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
58 * the link partner (LP) supports aneg but the LP never acked our base
59 * code word, it is likely that we never sent it to begin with.
60 * - Late failures: MII_LPA is filled with a value which seems to make sense
61 * but it actually is not what the LP is advertising. It seems that we
62 * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
63 * If this particular bit is not set when aneg is reported being done,
64 * it means MII_LPA is likely to be wrong.
65 *
66 * In both case, forcing a restart of the aneg process solve the problem.
67 * When this failure happens, the first retry is usually successful but,
68 * in some cases, it may take up to 6 retries to get a decent result
69 */
70static int meson_gxl_read_status(struct phy_device *phydev)
71{
72 int ret, wol, lpa, exp;
73
74 if (phydev->autoneg == AUTONEG_ENABLE) {
75 ret = genphy_aneg_done(phydev);
76 if (ret < 0)
77 return ret;
78 else if (!ret)
79 goto read_status_continue;
80
81 /* Need to access WOL bank, make sure the access is open */
82 ret = phy_write(phydev, 0x14, 0x0000);
83 if (ret)
84 return ret;
85 ret = phy_write(phydev, 0x14, 0x0400);
86 if (ret)
87 return ret;
88 ret = phy_write(phydev, 0x14, 0x0000);
89 if (ret)
90 return ret;
91 ret = phy_write(phydev, 0x14, 0x0400);
92 if (ret)
93 return ret;
94
95 /* Request LPI_STATUS WOL register */
96 ret = phy_write(phydev, 0x14, 0x8D80);
97 if (ret)
98 return ret;
99
100 /* Read LPI_STATUS value */
101 wol = phy_read(phydev, 0x15);
102 if (wol < 0)
103 return wol;
104
105 lpa = phy_read(phydev, MII_LPA);
106 if (lpa < 0)
107 return lpa;
108
109 exp = phy_read(phydev, MII_EXPANSION);
110 if (exp < 0)
111 return exp;
112
113 if (!(wol & BIT(12)) ||
114 ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
115 /* Looks like aneg failed after all */
116 phydev_dbg(phydev, "LPA corruption - aneg restart\n");
117 return genphy_restart_aneg(phydev);
118 }
119 }
120
121read_status_continue:
122 return genphy_read_status(phydev);
123}
124
53static struct phy_driver meson_gxl_phy[] = { 125static struct phy_driver meson_gxl_phy[] = {
54 { 126 {
55 .phy_id = 0x01814400, 127 .phy_id = 0x01814400,
@@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = {
60 .config_init = meson_gxl_config_init, 132 .config_init = meson_gxl_config_init,
61 .config_aneg = genphy_config_aneg, 133 .config_aneg = genphy_config_aneg,
62 .aneg_done = genphy_aneg_done, 134 .aneg_done = genphy_aneg_done,
63 .read_status = genphy_read_status, 135 .read_status = meson_gxl_read_status,
64 .suspend = genphy_suspend, 136 .suspend = genphy_suspend,
65 .resume = genphy_resume, 137 .resume = genphy_resume,
66 }, 138 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2b1e67bc1e73..ed10d1fc8f59 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop);
828 */ 828 */
829void phy_start(struct phy_device *phydev) 829void phy_start(struct phy_device *phydev)
830{ 830{
831 bool do_resume = false;
832 int err = 0; 831 int err = 0;
833 832
834 mutex_lock(&phydev->lock); 833 mutex_lock(&phydev->lock);
@@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev)
841 phydev->state = PHY_UP; 840 phydev->state = PHY_UP;
842 break; 841 break;
843 case PHY_HALTED: 842 case PHY_HALTED:
843 /* if phy was suspended, bring the physical link up again */
844 phy_resume(phydev);
845
844 /* make sure interrupts are re-enabled for the PHY */ 846 /* make sure interrupts are re-enabled for the PHY */
845 if (phydev->irq != PHY_POLL) { 847 if (phydev->irq != PHY_POLL) {
846 err = phy_enable_interrupts(phydev); 848 err = phy_enable_interrupts(phydev);
@@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev)
849 } 851 }
850 852
851 phydev->state = PHY_RESUMING; 853 phydev->state = PHY_RESUMING;
852 do_resume = true;
853 break; 854 break;
854 default: 855 default:
855 break; 856 break;
856 } 857 }
857 mutex_unlock(&phydev->lock); 858 mutex_unlock(&phydev->lock);
858 859
859 /* if phy was suspended, bring the physical link up again */
860 if (do_resume)
861 phy_resume(phydev);
862
863 phy_trigger_machine(phydev, true); 860 phy_trigger_machine(phydev, true);
864} 861}
865EXPORT_SYMBOL(phy_start); 862EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 67f25ac29025..b15b31ca2618 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
138 ret = phy_resume(phydev); 139 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
139 if (ret < 0) 141 if (ret < 0)
140 return ret; 142 return ret;
141 143
@@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1026 if (err) 1028 if (err)
1027 goto error; 1029 goto error;
1028 1030
1031 mutex_lock(&phydev->lock);
1029 phy_resume(phydev); 1032 phy_resume(phydev);
1033 mutex_unlock(&phydev->lock);
1030 phy_led_triggers_register(phydev); 1034 phy_led_triggers_register(phydev);
1031 1035
1032 return err; 1036 return err;
@@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev)
1157 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1161 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1158 int ret = 0; 1162 int ret = 0;
1159 1163
1164 WARN_ON(!mutex_is_locked(&phydev->lock));
1165
1160 if (phydev->drv && phydrv->resume) 1166 if (phydev->drv && phydrv->resume)
1161 ret = phydrv->resume(phydev); 1167 ret = phydrv->resume(phydev);
1162 1168
@@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev)
1639{ 1645{
1640 int value; 1646 int value;
1641 1647
1642 mutex_lock(&phydev->lock);
1643
1644 value = phy_read(phydev, MII_BMCR); 1648 value = phy_read(phydev, MII_BMCR);
1645 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 1649 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
1646 1650
1647 mutex_unlock(&phydev->lock);
1648
1649 return 0; 1651 return 0;
1650} 1652}
1651EXPORT_SYMBOL(genphy_resume); 1653EXPORT_SYMBOL(genphy_resume);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 304ec6555cd8..3000ddd1c7e2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1204,12 +1204,14 @@ static const struct usb_device_id products[] = {
1204 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1204 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1205 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1205 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1206 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1206 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1207 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1207 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1208 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1208 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1209 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1209 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1210 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1210 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1211 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1211 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1212 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1212 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1213 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1214 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1213 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1215 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1214 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1216 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1215 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1217 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 98258583abb0..3481e69738b5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
81 * can be looked up later */ 81 * can be looked up later */
82 of_node_get(child); 82 of_node_get(child);
83 phy->mdio.dev.of_node = child; 83 phy->mdio.dev.of_node = child;
84 phy->mdio.dev.fwnode = of_fwnode_handle(child);
84 85
85 /* All data is now stored in the phy struct; 86 /* All data is now stored in the phy struct;
86 * register it */ 87 * register it */
@@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
111 */ 112 */
112 of_node_get(child); 113 of_node_get(child);
113 mdiodev->dev.of_node = child; 114 mdiodev->dev.of_node = child;
115 mdiodev->dev.fwnode = of_fwnode_handle(child);
114 116
115 /* All data is now stored in the mdiodev struct; register it. */ 117 /* All data is now stored in the mdiodev struct; register it. */
116 rc = mdio_device_register(mdiodev); 118 rc = mdio_device_register(mdiodev);
@@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
206 mdio->phy_mask = ~0; 208 mdio->phy_mask = ~0;
207 209
208 mdio->dev.of_node = np; 210 mdio->dev.of_node = np;
211 mdio->dev.fwnode = of_fwnode_handle(np);
209 212
210 /* Get bus level PHY reset GPIO details */ 213 /* Get bus level PHY reset GPIO details */
211 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; 214 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 12796eccb2be..52ab3cb0a0bf 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1128 err = rcar_pcie_get_resources(pcie); 1128 err = rcar_pcie_get_resources(pcie);
1129 if (err < 0) { 1129 if (err < 0) {
1130 dev_err(dev, "failed to request resources: %d\n", err); 1130 dev_err(dev, "failed to request resources: %d\n", err);
1131 goto err_free_bridge; 1131 goto err_free_resource_list;
1132 } 1132 }
1133 1133
1134 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); 1134 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
1135 if (err) 1135 if (err)
1136 goto err_free_bridge; 1136 goto err_free_resource_list;
1137 1137
1138 pm_runtime_enable(dev); 1138 pm_runtime_enable(dev);
1139 err = pm_runtime_get_sync(dev); 1139 err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
1176err_pm_disable: 1176err_pm_disable:
1177 pm_runtime_disable(dev); 1177 pm_runtime_disable(dev);
1178 1178
1179err_free_bridge: 1179err_free_resource_list:
1180 pci_free_host_bridge(bridge);
1181 pci_free_resource_list(&pcie->resources); 1180 pci_free_resource_list(&pcie->resources);
1181 pci_free_host_bridge(bridge);
1182 1182
1183 return err; 1183 return err;
1184} 1184}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb72bf30..945099d49f8f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -999,7 +999,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
999 * the subsequent "thaw" callbacks for the device. 999 * the subsequent "thaw" callbacks for the device.
1000 */ 1000 */
1001 if (dev_pm_smart_suspend_and_suspended(dev)) { 1001 if (dev_pm_smart_suspend_and_suspended(dev)) {
1002 dev->power.direct_complete = true; 1002 dev_pm_skip_next_resume_phases(dev);
1003 return 0; 1003 return 0;
1004 } 1004 }
1005 1005
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index f3796164329e..d4aeac3477f5 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
118 return; 118 return;
119 } 119 }
120 input_report_key(data->idev, KEY_RFKILL, 1); 120 input_report_key(data->idev, KEY_RFKILL, 1);
121 input_sync(data->idev);
121 input_report_key(data->idev, KEY_RFKILL, 0); 122 input_report_key(data->idev, KEY_RFKILL, 0);
122 input_sync(data->idev); 123 input_sync(data->idev);
123} 124}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bf897b1832b1..cd4725e7e0b5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -37,6 +37,7 @@
37 37
38struct quirk_entry { 38struct quirk_entry {
39 u8 touchpad_led; 39 u8 touchpad_led;
40 u8 kbd_led_levels_off_1;
40 41
41 int needs_kbd_timeouts; 42 int needs_kbd_timeouts;
42 /* 43 /*
@@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
67 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, 68 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
68}; 69};
69 70
71static struct quirk_entry quirk_dell_latitude_e6410 = {
72 .kbd_led_levels_off_1 = 1,
73};
74
70static struct platform_driver platform_driver = { 75static struct platform_driver platform_driver = {
71 .driver = { 76 .driver = {
72 .name = "dell-laptop", 77 .name = "dell-laptop",
@@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
269 }, 274 },
270 .driver_data = &quirk_dell_xps13_9333, 275 .driver_data = &quirk_dell_xps13_9333,
271 }, 276 },
277 {
278 .callback = dmi_matched,
279 .ident = "Dell Latitude E6410",
280 .matches = {
281 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
282 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
283 },
284 .driver_data = &quirk_dell_latitude_e6410,
285 },
272 { } 286 { }
273}; 287};
274 288
@@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
1149 units = (buffer->output[2] >> 8) & 0xFF; 1163 units = (buffer->output[2] >> 8) & 0xFF;
1150 info->levels = (buffer->output[2] >> 16) & 0xFF; 1164 info->levels = (buffer->output[2] >> 16) & 0xFF;
1151 1165
1166 if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
1167 info->levels--;
1168
1152 if (units & BIT(0)) 1169 if (units & BIT(0))
1153 info->seconds = (buffer->output[3] >> 0) & 0xFF; 1170 info->seconds = (buffer->output[3] >> 0) & 0xFF;
1154 if (units & BIT(1)) 1171 if (units & BIT(1))
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 39d2f4518483..fb25b20df316 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable)
639 int ret; 639 int ret;
640 640
641 buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); 641 buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
642 if (!buffer)
643 return -ENOMEM;
642 buffer->cmd_class = CLASS_INFO; 644 buffer->cmd_class = CLASS_INFO;
643 buffer->cmd_select = SELECT_APP_REGISTRATION; 645 buffer->cmd_select = SELECT_APP_REGISTRATION;
644 buffer->input[0] = 0x10000; 646 buffer->input[0] = 0x10000;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 15015a24f8ad..badf42acbf95 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -565,9 +565,9 @@ enum qeth_cq {
565}; 565};
566 566
567struct qeth_ipato { 567struct qeth_ipato {
568 int enabled; 568 bool enabled;
569 int invert4; 569 bool invert4;
570 int invert6; 570 bool invert6;
571 struct list_head entries; 571 struct list_head entries;
572}; 572};
573 573
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 430e3214f7e2..6c815207f4f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1480,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
1480 qeth_set_intial_options(card); 1480 qeth_set_intial_options(card);
1481 /* IP address takeover */ 1481 /* IP address takeover */
1482 INIT_LIST_HEAD(&card->ipato.entries); 1482 INIT_LIST_HEAD(&card->ipato.entries);
1483 card->ipato.enabled = 0; 1483 card->ipato.enabled = false;
1484 card->ipato.invert4 = 0; 1484 card->ipato.invert4 = false;
1485 card->ipato.invert6 = 0; 1485 card->ipato.invert6 = false;
1486 /* init QDIO stuff */ 1486 /* init QDIO stuff */
1487 qeth_init_qdio_info(card); 1487 qeth_init_qdio_info(card);
1488 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1488 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 194ae9b577cc..e5833837b799 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
82int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 82int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
83void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 83void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
84 const u8 *); 84 const u8 *);
85int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 85void qeth_l3_update_ipato(struct qeth_card *card);
86struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); 86struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
87int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); 87int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
88int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); 88int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6a73894b0cb5..ef0961e18686 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -164,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
164 } 164 }
165} 165}
166 166
167int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 167static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
168 struct qeth_ipaddr *addr) 168 struct qeth_ipaddr *addr)
169{ 169{
170 struct qeth_ipato_entry *ipatoe; 170 struct qeth_ipato_entry *ipatoe;
171 u8 addr_bits[128] = {0, }; 171 u8 addr_bits[128] = {0, };
@@ -174,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
174 174
175 if (!card->ipato.enabled) 175 if (!card->ipato.enabled)
176 return 0; 176 return 0;
177 if (addr->type != QETH_IP_TYPE_NORMAL)
178 return 0;
177 179
178 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 180 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
179 (addr->proto == QETH_PROT_IPV4)? 4:16); 181 (addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -290,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
290 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 292 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
291 addr->ref_counter = 1; 293 addr->ref_counter = 1;
292 294
293 if (addr->type == QETH_IP_TYPE_NORMAL && 295 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
294 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
295 QETH_CARD_TEXT(card, 2, "tkovaddr"); 296 QETH_CARD_TEXT(card, 2, "tkovaddr");
296 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 297 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
297 } 298 }
@@ -605,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
605/* 606/*
606 * IP address takeover related functions 607 * IP address takeover related functions
607 */ 608 */
609
610/**
611 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
612 *
613 * Caller must hold ip_lock.
614 */
615void qeth_l3_update_ipato(struct qeth_card *card)
616{
617 struct qeth_ipaddr *addr;
618 unsigned int i;
619
620 hash_for_each(card->ip_htable, i, addr, hnode) {
621 if (addr->type != QETH_IP_TYPE_NORMAL)
622 continue;
623 if (qeth_l3_is_addr_covered_by_ipato(card, addr))
624 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
625 else
626 addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
627 }
628}
629
608static void qeth_l3_clear_ipato_list(struct qeth_card *card) 630static void qeth_l3_clear_ipato_list(struct qeth_card *card)
609{ 631{
610 struct qeth_ipato_entry *ipatoe, *tmp; 632 struct qeth_ipato_entry *ipatoe, *tmp;
@@ -616,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
616 kfree(ipatoe); 638 kfree(ipatoe);
617 } 639 }
618 640
641 qeth_l3_update_ipato(card);
619 spin_unlock_bh(&card->ip_lock); 642 spin_unlock_bh(&card->ip_lock);
620} 643}
621 644
@@ -640,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
640 } 663 }
641 } 664 }
642 665
643 if (!rc) 666 if (!rc) {
644 list_add_tail(&new->entry, &card->ipato.entries); 667 list_add_tail(&new->entry, &card->ipato.entries);
668 qeth_l3_update_ipato(card);
669 }
645 670
646 spin_unlock_bh(&card->ip_lock); 671 spin_unlock_bh(&card->ip_lock);
647 672
@@ -664,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
664 (proto == QETH_PROT_IPV4)? 4:16) && 689 (proto == QETH_PROT_IPV4)? 4:16) &&
665 (ipatoe->mask_bits == mask_bits)) { 690 (ipatoe->mask_bits == mask_bits)) {
666 list_del(&ipatoe->entry); 691 list_del(&ipatoe->entry);
692 qeth_l3_update_ipato(card);
667 kfree(ipatoe); 693 kfree(ipatoe);
668 } 694 }
669 } 695 }
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index bd12fdf678be..6ea2b528a64e 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
370 struct device_attribute *attr, const char *buf, size_t count) 370 struct device_attribute *attr, const char *buf, size_t count)
371{ 371{
372 struct qeth_card *card = dev_get_drvdata(dev); 372 struct qeth_card *card = dev_get_drvdata(dev);
373 struct qeth_ipaddr *addr; 373 bool enable;
374 int i, rc = 0; 374 int rc = 0;
375 375
376 if (!card) 376 if (!card)
377 return -EINVAL; 377 return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
384 } 384 }
385 385
386 if (sysfs_streq(buf, "toggle")) { 386 if (sysfs_streq(buf, "toggle")) {
387 card->ipato.enabled = (card->ipato.enabled)? 0 : 1; 387 enable = !card->ipato.enabled;
388 } else if (sysfs_streq(buf, "1")) { 388 } else if (kstrtobool(buf, &enable)) {
389 card->ipato.enabled = 1;
390 hash_for_each(card->ip_htable, i, addr, hnode) {
391 if ((addr->type == QETH_IP_TYPE_NORMAL) &&
392 qeth_l3_is_addr_covered_by_ipato(card, addr))
393 addr->set_flags |=
394 QETH_IPA_SETIP_TAKEOVER_FLAG;
395 }
396 } else if (sysfs_streq(buf, "0")) {
397 card->ipato.enabled = 0;
398 hash_for_each(card->ip_htable, i, addr, hnode) {
399 if (addr->set_flags &
400 QETH_IPA_SETIP_TAKEOVER_FLAG)
401 addr->set_flags &=
402 ~QETH_IPA_SETIP_TAKEOVER_FLAG;
403 }
404 } else
405 rc = -EINVAL; 389 rc = -EINVAL;
390 goto out;
391 }
392
393 if (card->ipato.enabled != enable) {
394 card->ipato.enabled = enable;
395 spin_lock_bh(&card->ip_lock);
396 qeth_l3_update_ipato(card);
397 spin_unlock_bh(&card->ip_lock);
398 }
406out: 399out:
407 mutex_unlock(&card->conf_mutex); 400 mutex_unlock(&card->conf_mutex);
408 return rc ? rc : count; 401 return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
428 const char *buf, size_t count) 421 const char *buf, size_t count)
429{ 422{
430 struct qeth_card *card = dev_get_drvdata(dev); 423 struct qeth_card *card = dev_get_drvdata(dev);
424 bool invert;
431 int rc = 0; 425 int rc = 0;
432 426
433 if (!card) 427 if (!card)
434 return -EINVAL; 428 return -EINVAL;
435 429
436 mutex_lock(&card->conf_mutex); 430 mutex_lock(&card->conf_mutex);
437 if (sysfs_streq(buf, "toggle")) 431 if (sysfs_streq(buf, "toggle")) {
438 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; 432 invert = !card->ipato.invert4;
439 else if (sysfs_streq(buf, "1")) 433 } else if (kstrtobool(buf, &invert)) {
440 card->ipato.invert4 = 1;
441 else if (sysfs_streq(buf, "0"))
442 card->ipato.invert4 = 0;
443 else
444 rc = -EINVAL; 434 rc = -EINVAL;
435 goto out;
436 }
437
438 if (card->ipato.invert4 != invert) {
439 card->ipato.invert4 = invert;
440 spin_lock_bh(&card->ip_lock);
441 qeth_l3_update_ipato(card);
442 spin_unlock_bh(&card->ip_lock);
443 }
444out:
445 mutex_unlock(&card->conf_mutex); 445 mutex_unlock(&card->conf_mutex);
446 return rc ? rc : count; 446 return rc ? rc : count;
447} 447}
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
607 struct device_attribute *attr, const char *buf, size_t count) 607 struct device_attribute *attr, const char *buf, size_t count)
608{ 608{
609 struct qeth_card *card = dev_get_drvdata(dev); 609 struct qeth_card *card = dev_get_drvdata(dev);
610 bool invert;
610 int rc = 0; 611 int rc = 0;
611 612
612 if (!card) 613 if (!card)
613 return -EINVAL; 614 return -EINVAL;
614 615
615 mutex_lock(&card->conf_mutex); 616 mutex_lock(&card->conf_mutex);
616 if (sysfs_streq(buf, "toggle")) 617 if (sysfs_streq(buf, "toggle")) {
617 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; 618 invert = !card->ipato.invert6;
618 else if (sysfs_streq(buf, "1")) 619 } else if (kstrtobool(buf, &invert)) {
619 card->ipato.invert6 = 1;
620 else if (sysfs_streq(buf, "0"))
621 card->ipato.invert6 = 0;
622 else
623 rc = -EINVAL; 620 rc = -EINVAL;
621 goto out;
622 }
623
624 if (card->ipato.invert6 != invert) {
625 card->ipato.invert6 = invert;
626 spin_lock_bh(&card->ip_lock);
627 qeth_l3_update_ipato(card);
628 spin_unlock_bh(&card->ip_lock);
629 }
630out:
624 mutex_unlock(&card->conf_mutex); 631 mutex_unlock(&card->conf_mutex);
625 return rc ? rc : count; 632 return rc ? rc : count;
626} 633}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index bec9f3193f60..80a8cb26cdea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2482,8 +2482,8 @@ int aac_command_thread(void *data)
2482 /* Synchronize our watches */ 2482 /* Synchronize our watches */
2483 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) 2483 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2484 && (now.tv_nsec > (NSEC_PER_SEC / HZ))) 2484 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2485 difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) 2485 difference = HZ + HZ / 2 -
2486 + NSEC_PER_SEC / 2) / NSEC_PER_SEC; 2486 now.tv_nsec / (NSEC_PER_SEC / HZ);
2487 else { 2487 else {
2488 if (now.tv_nsec > NSEC_PER_SEC / 2) 2488 if (now.tv_nsec > NSEC_PER_SEC / 2)
2489 ++now.tv_sec; 2489 ++now.tv_sec;
@@ -2507,6 +2507,10 @@ int aac_command_thread(void *data)
2507 if (kthread_should_stop()) 2507 if (kthread_should_stop())
2508 break; 2508 break;
2509 2509
2510 /*
2511 * we probably want usleep_range() here instead of the
2512 * jiffies computation
2513 */
2510 schedule_timeout(difference); 2514 schedule_timeout(difference);
2511 2515
2512 if (kthread_should_stop()) 2516 if (kthread_should_stop())
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 72ca2a2e08e2..b2fa195adc7a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
3135 struct fc_bsg_request *bsg_request = job->request; 3135 struct fc_bsg_request *bsg_request = job->request;
3136 struct fc_bsg_reply *bsg_reply = job->reply; 3136 struct fc_bsg_reply *bsg_reply = job->reply;
3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3138 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3138 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3139 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3139 struct bfad_s *bfad = im_port->bfad; 3140 struct bfad_s *bfad = im_port->bfad;
3140 void *payload_kbuf; 3141 void *payload_kbuf;
3141 int rc = -EINVAL; 3142 int rc = -EINVAL;
@@ -3350,7 +3351,8 @@ int
3350bfad_im_bsg_els_ct_request(struct bsg_job *job) 3351bfad_im_bsg_els_ct_request(struct bsg_job *job)
3351{ 3352{
3352 struct bfa_bsg_data *bsg_data; 3353 struct bfa_bsg_data *bsg_data;
3353 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3354 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3355 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3354 struct bfad_s *bfad = im_port->bfad; 3356 struct bfad_s *bfad = im_port->bfad;
3355 bfa_bsg_fcpt_t *bsg_fcpt; 3357 bfa_bsg_fcpt_t *bsg_fcpt;
3356 struct bfad_fcxp *drv_fcxp; 3358 struct bfad_fcxp *drv_fcxp;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 24e657a4ec80..c05d6e91e4bd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -546,6 +546,7 @@ int
546bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 546bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
547 struct device *dev) 547 struct device *dev)
548{ 548{
549 struct bfad_im_port_pointer *im_portp;
549 int error = 1; 550 int error = 1;
550 551
551 mutex_lock(&bfad_mutex); 552 mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
564 goto out_free_idr; 565 goto out_free_idr;
565 } 566 }
566 567
567 im_port->shost->hostdata[0] = (unsigned long)im_port; 568 im_portp = shost_priv(im_port->shost);
569 im_portp->p = im_port;
568 im_port->shost->unique_id = im_port->idr_id; 570 im_port->shost->unique_id = im_port->idr_id;
569 im_port->shost->this_id = -1; 571 im_port->shost->this_id = -1;
570 im_port->shost->max_id = MAX_FCP_TARGET; 572 im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
748 750
749 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 751 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
750 752
751 return scsi_host_alloc(sht, sizeof(unsigned long)); 753 return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
752} 754}
753 755
754void 756void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c81ec2a77ef5..06ce4ba2b7bc 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
69 struct fc_vport *fc_vport; 69 struct fc_vport *fc_vport;
70}; 70};
71 71
72struct bfad_im_port_pointer {
73 struct bfad_im_port_s *p;
74};
75
76static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
77{
78 struct bfad_im_port_pointer *im_portp = shost_priv(host);
79 return im_portp->p;
80}
81
72enum bfad_itnim_state { 82enum bfad_itnim_state {
73 ITNIM_STATE_NONE, 83 ITNIM_STATE_NONE,
74 ITNIM_STATE_ONLINE, 84 ITNIM_STATE_ONLINE,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 5da46052e179..21be672679fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
904 case ELS_FLOGI: 904 case ELS_FLOGI:
905 if (!lport->point_to_multipoint) 905 if (!lport->point_to_multipoint)
906 fc_lport_recv_flogi_req(lport, fp); 906 fc_lport_recv_flogi_req(lport, fp);
907 else
908 fc_rport_recv_req(lport, fp);
907 break; 909 break;
908 case ELS_LOGO: 910 case ELS_LOGO:
909 if (fc_frame_sid(fp) == FC_FID_FLOGI) 911 if (fc_frame_sid(fp) == FC_FID_FLOGI)
910 fc_lport_recv_logo_req(lport, fp); 912 fc_lport_recv_logo_req(lport, fp);
913 else
914 fc_rport_recv_req(lport, fp);
911 break; 915 break;
912 case ELS_RSCN: 916 case ELS_RSCN:
913 lport->tt.disc_recv_req(lport, fp); 917 lport->tt.disc_recv_req(lport, fp);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index ca1566237ae7..3183d63de4da 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2145 struct sas_rphy *rphy) 2145 struct sas_rphy *rphy)
2146{ 2146{
2147 struct domain_device *dev; 2147 struct domain_device *dev;
2148 unsigned int reslen = 0; 2148 unsigned int rcvlen = 0;
2149 int ret = -EINVAL; 2149 int ret = -EINVAL;
2150 2150
2151 /* no rphy means no smp target support (ie aic94xx host) */ 2151 /* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2179 2179
2180 ret = smp_execute_task_sg(dev, job->request_payload.sg_list, 2180 ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
2181 job->reply_payload.sg_list); 2181 job->reply_payload.sg_list);
2182 if (ret > 0) { 2182 if (ret >= 0) {
2183 /* positive number is the untransferred residual */ 2183 /* bsg_job_done() requires the length received */
2184 reslen = ret; 2184 rcvlen = job->reply_payload.payload_len - ret;
2185 ret = 0; 2185 ret = 0;
2186 } 2186 }
2187 2187
2188out: 2188out:
2189 bsg_job_done(job, ret, reslen); 2189 bsg_job_done(job, ret, rcvlen);
2190} 2190}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 56faeb049b4a..87c08ff37ddd 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
755 if (rc < 0) { 755 if (rc < 0) {
756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to RQ %d: %x %x\n", 757 "6409 Cannot post to RQ %d: %x %x\n",
759 rqb_entry->hrq->queue_id, 758 rqb_entry->hrq->queue_id,
760 rqb_entry->hrq->host_index, 759 rqb_entry->hrq->host_index,
761 rqb_entry->hrq->hba_index); 760 rqb_entry->hrq->hba_index);
761 (rqbp->rqb_free_buffer)(phba, rqb_entry);
762 } else { 762 } else {
763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
764 rqbp->buffer_count++; 764 rqbp->buffer_count++;
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 01f08c03f2c1..c3765d29fd3f 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
8{ 8{
9 struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); 9 struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
10 int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); 10 int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
11 char buf[80]; 11 const u8 *const cdb = READ_ONCE(cmd->cmnd);
12 char buf[80] = "(?)";
12 13
13 __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); 14 if (cdb)
15 __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
14 seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, 16 seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
15 cmd->retries, msecs / 1000, msecs % 1000); 17 cmd->retries, msecs / 1000, msecs % 1000);
16} 18}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 78d4aa8df675..449ef5adbb2b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
34}; 34};
35 35
36 36
37static const char spaces[] = " "; /* 16 of them */
38static blist_flags_t scsi_default_dev_flags; 37static blist_flags_t scsi_default_dev_flags;
39static LIST_HEAD(scsi_dev_info_list); 38static LIST_HEAD(scsi_dev_info_list);
40static char scsi_dev_flags[256]; 39static char scsi_dev_flags[256];
@@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
298 size_t from_length; 297 size_t from_length;
299 298
300 from_length = strlen(from); 299 from_length = strlen(from);
301 strncpy(to, from, min(to_length, from_length)); 300 /* This zero-pads the destination */
302 if (from_length < to_length) { 301 strncpy(to, from, to_length);
303 if (compatible) { 302 if (from_length < to_length && !compatible) {
304 /* 303 /*
305 * NUL terminate the string if it is short. 304 * space pad the string if it is short.
306 */ 305 */
307 to[from_length] = '\0'; 306 memset(&to[from_length], ' ', to_length - from_length);
308 } else {
309 /*
310 * space pad the string if it is short.
311 */
312 strncpy(&to[from_length], spaces,
313 to_length - from_length);
314 }
315 } 307 }
316 if (from_length > to_length) 308 if (from_length > to_length)
317 printk(KERN_WARNING "%s: %s string '%s' is too long\n", 309 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -458,7 +450,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
458 /* 450 /*
459 * vendor strings must be an exact match 451 * vendor strings must be an exact match
460 */ 452 */
461 if (vmax != strlen(devinfo->vendor) || 453 if (vmax != strnlen(devinfo->vendor,
454 sizeof(devinfo->vendor)) ||
462 memcmp(devinfo->vendor, vskip, vmax)) 455 memcmp(devinfo->vendor, vskip, vmax))
463 continue; 456 continue;
464 457
@@ -466,7 +459,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
466 * @model specifies the full string, and 459 * @model specifies the full string, and
467 * must be larger or equal to devinfo->model 460 * must be larger or equal to devinfo->model
468 */ 461 */
469 mlen = strlen(devinfo->model); 462 mlen = strnlen(devinfo->model, sizeof(devinfo->model));
470 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) 463 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
471 continue; 464 continue;
472 return devinfo; 465 return devinfo;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 00742c50cd44..d9ca1dfab154 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
1967out_put_device: 1967out_put_device:
1968 put_device(&sdev->sdev_gendev); 1968 put_device(&sdev->sdev_gendev);
1969out: 1969out:
1970 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
1971 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1970 return false; 1972 return false;
1971} 1973}
1972 1974
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 24fe68522716..a028ab3322a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1312static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1312static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1313{ 1313{
1314 struct request *rq = SCpnt->request; 1314 struct request *rq = SCpnt->request;
1315 u8 *cmnd;
1315 1316
1316 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) 1317 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
1317 sd_zbc_write_unlock_zone(SCpnt); 1318 sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1320 __free_page(rq->special_vec.bv_page); 1321 __free_page(rq->special_vec.bv_page);
1321 1322
1322 if (SCpnt->cmnd != scsi_req(rq)->cmd) { 1323 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1323 mempool_free(SCpnt->cmnd, sd_cdb_pool); 1324 cmnd = SCpnt->cmnd;
1324 SCpnt->cmnd = NULL; 1325 SCpnt->cmnd = NULL;
1325 SCpnt->cmd_len = 0; 1326 SCpnt->cmd_len = 0;
1327 mempool_free(cmnd, sd_cdb_pool);
1326 } 1328 }
1327} 1329}
1328 1330
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 1799d3f26a9e..2035835b62dc 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
1769 struct device *dev = drvdata_to_dev(ctx->drvdata); 1769 struct device *dev = drvdata_to_dev(ctx->drvdata);
1770 struct ahash_req_ctx *state = ahash_request_ctx(req); 1770 struct ahash_req_ctx *state = ahash_request_ctx(req);
1771 u32 tmp; 1771 u32 tmp;
1772 int rc; 1772 int rc = 0;
1773 1773
1774 memcpy(&tmp, in, sizeof(u32)); 1774 memcpy(&tmp, in, sizeof(u32));
1775 if (tmp != CC_EXPORT_MAGIC) { 1775 if (tmp != CC_EXPORT_MAGIC) {
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e69a2153c999..12c9df9cddde 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
102 102
103 currentValue = READ_REG(REG_DATAMODUL); 103 currentValue = READ_REG(REG_DATAMODUL);
104 104
105 switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define 105 switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
106 case DATAMODUL_MODULATION_TYPE_OOK: return OOK; 106 case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
107 case DATAMODUL_MODULATION_TYPE_FSK: return FSK; 107 case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
108 default: return undefined; 108 default: return undefined;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index a60ae778ccb8..e9843c53fe31 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -240,12 +240,12 @@ static int optee_open(struct tee_context *ctx)
240 if (teedev == optee->supp_teedev) { 240 if (teedev == optee->supp_teedev) {
241 bool busy = true; 241 bool busy = true;
242 242
243 mutex_lock(&optee->supp.ctx_mutex); 243 mutex_lock(&optee->supp.mutex);
244 if (!optee->supp.ctx) { 244 if (!optee->supp.ctx) {
245 busy = false; 245 busy = false;
246 optee->supp.ctx = ctx; 246 optee->supp.ctx = ctx;
247 } 247 }
248 mutex_unlock(&optee->supp.ctx_mutex); 248 mutex_unlock(&optee->supp.mutex);
249 if (busy) { 249 if (busy) {
250 kfree(ctxdata); 250 kfree(ctxdata);
251 return -EBUSY; 251 return -EBUSY;
@@ -305,11 +305,8 @@ static void optee_release(struct tee_context *ctx)
305 305
306 ctx->data = NULL; 306 ctx->data = NULL;
307 307
308 if (teedev == optee->supp_teedev) { 308 if (teedev == optee->supp_teedev)
309 mutex_lock(&optee->supp.ctx_mutex); 309 optee_supp_release(&optee->supp);
310 optee->supp.ctx = NULL;
311 mutex_unlock(&optee->supp.ctx_mutex);
312 }
313} 310}
314 311
315static const struct tee_driver_ops optee_ops = { 312static const struct tee_driver_ops optee_ops = {
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index de7962ebc1b6..a85a24725e31 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -53,36 +53,24 @@ struct optee_wait_queue {
53 * @ctx the context of current connected supplicant. 53 * @ctx the context of current connected supplicant.
54 * if !NULL the supplicant device is available for use, 54 * if !NULL the supplicant device is available for use,
55 * else busy 55 * else busy
56 * @ctx_mutex: held while accessing @ctx 56 * @mutex: held while accessing content of this struct
57 * @func: supplicant function id to call 57 * @req_id: current request id if supplicant is doing synchronous
58 * @ret: call return value 58 * communication, else -1
59 * @num_params: number of elements in @param 59 * @reqs: queued request not yet retrieved by supplicant
60 * @param: parameters for @func 60 * @idr: IDR holding all requests currently being processed
61 * @req_posted: if true, a request has been posted to the supplicant 61 * by supplicant
62 * @supp_next_send: if true, next step is for supplicant to send response 62 * @reqs_c: completion used by supplicant when waiting for a
63 * @thrd_mutex: held by the thread doing a request to supplicant 63 * request to be queued.
64 * @supp_mutex: held by supplicant while operating on this struct
65 * @data_to_supp: supplicant is waiting on this for next request
66 * @data_from_supp: requesting thread is waiting on this to get the result
67 */ 64 */
68struct optee_supp { 65struct optee_supp {
66 /* Serializes access to this struct */
67 struct mutex mutex;
69 struct tee_context *ctx; 68 struct tee_context *ctx;
70 /* Serializes access of ctx */ 69
71 struct mutex ctx_mutex; 70 int req_id;
72 71 struct list_head reqs;
73 u32 func; 72 struct idr idr;
74 u32 ret; 73 struct completion reqs_c;
75 size_t num_params;
76 struct tee_param *param;
77
78 bool req_posted;
79 bool supp_next_send;
80 /* Serializes access to this struct for requesting thread */
81 struct mutex thrd_mutex;
82 /* Serializes access to this struct for supplicant threads */
83 struct mutex supp_mutex;
84 struct completion data_to_supp;
85 struct completion data_from_supp;
86}; 74};
87 75
88/** 76/**
@@ -154,6 +142,7 @@ int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
154int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len); 142int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
155void optee_supp_init(struct optee_supp *supp); 143void optee_supp_init(struct optee_supp *supp);
156void optee_supp_uninit(struct optee_supp *supp); 144void optee_supp_uninit(struct optee_supp *supp);
145void optee_supp_release(struct optee_supp *supp);
157 146
158int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, 147int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
159 struct tee_param *param); 148 struct tee_param *param);
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index 690e48a61aca..41aea12e2bcc 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -192,10 +192,10 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
192 if (ret) 192 if (ret)
193 return ERR_PTR(-ENOMEM); 193 return ERR_PTR(-ENOMEM);
194 194
195 mutex_lock(&optee->supp.ctx_mutex); 195 mutex_lock(&optee->supp.mutex);
196 /* Increases count as secure world doesn't have a reference */ 196 /* Increases count as secure world doesn't have a reference */
197 shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); 197 shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
198 mutex_unlock(&optee->supp.ctx_mutex); 198 mutex_unlock(&optee->supp.mutex);
199 return shm; 199 return shm;
200} 200}
201 201
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index b4ea0678a436..df35fc01fd3e 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -16,21 +16,61 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include "optee_private.h" 17#include "optee_private.h"
18 18
19struct optee_supp_req {
20 struct list_head link;
21
22 bool busy;
23 u32 func;
24 u32 ret;
25 size_t num_params;
26 struct tee_param *param;
27
28 struct completion c;
29};
30
19void optee_supp_init(struct optee_supp *supp) 31void optee_supp_init(struct optee_supp *supp)
20{ 32{
21 memset(supp, 0, sizeof(*supp)); 33 memset(supp, 0, sizeof(*supp));
22 mutex_init(&supp->ctx_mutex); 34 mutex_init(&supp->mutex);
23 mutex_init(&supp->thrd_mutex); 35 init_completion(&supp->reqs_c);
24 mutex_init(&supp->supp_mutex); 36 idr_init(&supp->idr);
25 init_completion(&supp->data_to_supp); 37 INIT_LIST_HEAD(&supp->reqs);
26 init_completion(&supp->data_from_supp); 38 supp->req_id = -1;
27} 39}
28 40
29void optee_supp_uninit(struct optee_supp *supp) 41void optee_supp_uninit(struct optee_supp *supp)
30{ 42{
31 mutex_destroy(&supp->ctx_mutex); 43 mutex_destroy(&supp->mutex);
32 mutex_destroy(&supp->thrd_mutex); 44 idr_destroy(&supp->idr);
33 mutex_destroy(&supp->supp_mutex); 45}
46
47void optee_supp_release(struct optee_supp *supp)
48{
49 int id;
50 struct optee_supp_req *req;
51 struct optee_supp_req *req_tmp;
52
53 mutex_lock(&supp->mutex);
54
55 /* Abort all request retrieved by supplicant */
56 idr_for_each_entry(&supp->idr, req, id) {
57 req->busy = false;
58 idr_remove(&supp->idr, id);
59 req->ret = TEEC_ERROR_COMMUNICATION;
60 complete(&req->c);
61 }
62
63 /* Abort all queued requests */
64 list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
65 list_del(&req->link);
66 req->ret = TEEC_ERROR_COMMUNICATION;
67 complete(&req->c);
68 }
69
70 supp->ctx = NULL;
71 supp->req_id = -1;
72
73 mutex_unlock(&supp->mutex);
34} 74}
35 75
36/** 76/**
@@ -44,53 +84,42 @@ void optee_supp_uninit(struct optee_supp *supp)
44 */ 84 */
45u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, 85u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
46 struct tee_param *param) 86 struct tee_param *param)
87
47{ 88{
48 bool interruptable;
49 struct optee *optee = tee_get_drvdata(ctx->teedev); 89 struct optee *optee = tee_get_drvdata(ctx->teedev);
50 struct optee_supp *supp = &optee->supp; 90 struct optee_supp *supp = &optee->supp;
91 struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
92 bool interruptable;
51 u32 ret; 93 u32 ret;
52 94
53 /* 95 if (!req)
54 * Other threads blocks here until we've copied our answer from 96 return TEEC_ERROR_OUT_OF_MEMORY;
55 * supplicant.
56 */
57 while (mutex_lock_interruptible(&supp->thrd_mutex)) {
58 /* See comment below on when the RPC can be interrupted. */
59 mutex_lock(&supp->ctx_mutex);
60 interruptable = !supp->ctx;
61 mutex_unlock(&supp->ctx_mutex);
62 if (interruptable)
63 return TEEC_ERROR_COMMUNICATION;
64 }
65 97
66 /* 98 init_completion(&req->c);
67 * We have exclusive access now since the supplicant at this 99 req->func = func;
68 * point is either doing a 100 req->num_params = num_params;
69 * wait_for_completion_interruptible(&supp->data_to_supp) or is in 101 req->param = param;
70 * userspace still about to do the ioctl() to enter
71 * optee_supp_recv() below.
72 */
73 102
74 supp->func = func; 103 /* Insert the request in the request list */
75 supp->num_params = num_params; 104 mutex_lock(&supp->mutex);
76 supp->param = param; 105 list_add_tail(&req->link, &supp->reqs);
77 supp->req_posted = true; 106 mutex_unlock(&supp->mutex);
78 107
79 /* Let supplicant get the data */ 108 /* Tell an eventual waiter there's a new request */
80 complete(&supp->data_to_supp); 109 complete(&supp->reqs_c);
81 110
82 /* 111 /*
83 * Wait for supplicant to process and return result, once we've 112 * Wait for supplicant to process and return result, once we've
84 * returned from wait_for_completion(data_from_supp) we have 113 * returned from wait_for_completion(&req->c) successfully we have
85 * exclusive access again. 114 * exclusive access again.
86 */ 115 */
87 while (wait_for_completion_interruptible(&supp->data_from_supp)) { 116 while (wait_for_completion_interruptible(&req->c)) {
88 mutex_lock(&supp->ctx_mutex); 117 mutex_lock(&supp->mutex);
89 interruptable = !supp->ctx; 118 interruptable = !supp->ctx;
90 if (interruptable) { 119 if (interruptable) {
91 /* 120 /*
92 * There's no supplicant available and since the 121 * There's no supplicant available and since the
93 * supp->ctx_mutex currently is held none can 122 * supp->mutex currently is held none can
94 * become available until the mutex released 123 * become available until the mutex released
95 * again. 124 * again.
96 * 125 *
@@ -101,24 +130,91 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
101 * will serve all requests in a timely manner and 130 * will serve all requests in a timely manner and
102 * interrupting then wouldn't make sense. 131 * interrupting then wouldn't make sense.
103 */ 132 */
104 supp->ret = TEEC_ERROR_COMMUNICATION; 133 interruptable = !req->busy;
105 init_completion(&supp->data_to_supp); 134 if (!req->busy)
135 list_del(&req->link);
106 } 136 }
107 mutex_unlock(&supp->ctx_mutex); 137 mutex_unlock(&supp->mutex);
108 if (interruptable) 138
139 if (interruptable) {
140 req->ret = TEEC_ERROR_COMMUNICATION;
109 break; 141 break;
142 }
110 } 143 }
111 144
112 ret = supp->ret; 145 ret = req->ret;
113 supp->param = NULL; 146 kfree(req);
114 supp->req_posted = false;
115
116 /* We're done, let someone else talk to the supplicant now. */
117 mutex_unlock(&supp->thrd_mutex);
118 147
119 return ret; 148 return ret;
120} 149}
121 150
151static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
152 int num_params, int *id)
153{
154 struct optee_supp_req *req;
155
156 if (supp->req_id != -1) {
157 /*
158 * Supplicant should not mix synchronous and asnynchronous
159 * requests.
160 */
161 return ERR_PTR(-EINVAL);
162 }
163
164 if (list_empty(&supp->reqs))
165 return NULL;
166
167 req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
168
169 if (num_params < req->num_params) {
170 /* Not enough room for parameters */
171 return ERR_PTR(-EINVAL);
172 }
173
174 *id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
175 if (*id < 0)
176 return ERR_PTR(-ENOMEM);
177
178 list_del(&req->link);
179 req->busy = true;
180
181 return req;
182}
183
184static int supp_check_recv_params(size_t num_params, struct tee_param *params,
185 size_t *num_meta)
186{
187 size_t n;
188
189 if (!num_params)
190 return -EINVAL;
191
192 /*
193 * If there's memrefs we need to decrease those as they where
194 * increased earlier and we'll even refuse to accept any below.
195 */
196 for (n = 0; n < num_params; n++)
197 if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
198 tee_shm_put(params[n].u.memref.shm);
199
200 /*
201 * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
202 * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
203 */
204 for (n = 0; n < num_params; n++)
205 if (params[n].attr &&
206 params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
207 return -EINVAL;
208
209 /* At most we'll need one meta parameter so no need to check for more */
210 if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
211 *num_meta = 1;
212 else
213 *num_meta = 0;
214
215 return 0;
216}
217
122/** 218/**
123 * optee_supp_recv() - receive request for supplicant 219 * optee_supp_recv() - receive request for supplicant
124 * @ctx: context receiving the request 220 * @ctx: context receiving the request
@@ -135,65 +231,99 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
135 struct tee_device *teedev = ctx->teedev; 231 struct tee_device *teedev = ctx->teedev;
136 struct optee *optee = tee_get_drvdata(teedev); 232 struct optee *optee = tee_get_drvdata(teedev);
137 struct optee_supp *supp = &optee->supp; 233 struct optee_supp *supp = &optee->supp;
234 struct optee_supp_req *req = NULL;
235 int id;
236 size_t num_meta;
138 int rc; 237 int rc;
139 238
140 /* 239 rc = supp_check_recv_params(*num_params, param, &num_meta);
141 * In case two threads in one supplicant is calling this function 240 if (rc)
142 * simultaneously we need to protect the data with a mutex which 241 return rc;
143 * we'll release before returning. 242
144 */ 243 while (true) {
145 mutex_lock(&supp->supp_mutex); 244 mutex_lock(&supp->mutex);
245 req = supp_pop_entry(supp, *num_params - num_meta, &id);
246 mutex_unlock(&supp->mutex);
247
248 if (req) {
249 if (IS_ERR(req))
250 return PTR_ERR(req);
251 break;
252 }
146 253
147 if (supp->supp_next_send) {
148 /* 254 /*
149 * optee_supp_recv() has been called again without 255 * If we didn't get a request we'll block in
150 * a optee_supp_send() in between. Supplicant has 256 * wait_for_completion() to avoid needless spinning.
151 * probably been restarted before it was able to 257 *
152 * write back last result. Abort last request and 258 * This is where supplicant will be hanging most of
153 * wait for a new. 259 * the time, let's make this interruptable so we
260 * can easily restart supplicant if needed.
154 */ 261 */
155 if (supp->req_posted) { 262 if (wait_for_completion_interruptible(&supp->reqs_c))
156 supp->ret = TEEC_ERROR_COMMUNICATION; 263 return -ERESTARTSYS;
157 supp->supp_next_send = false;
158 complete(&supp->data_from_supp);
159 }
160 } 264 }
161 265
162 /* 266 if (num_meta) {
163 * This is where supplicant will be hanging most of the 267 /*
164 * time, let's make this interruptable so we can easily 268 * tee-supplicant support meta parameters -> requsts can be
165 * restart supplicant if needed. 269 * processed asynchronously.
166 */ 270 */
167 if (wait_for_completion_interruptible(&supp->data_to_supp)) { 271 param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
168 rc = -ERESTARTSYS; 272 TEE_IOCTL_PARAM_ATTR_META;
169 goto out; 273 param->u.value.a = id;
274 param->u.value.b = 0;
275 param->u.value.c = 0;
276 } else {
277 mutex_lock(&supp->mutex);
278 supp->req_id = id;
279 mutex_unlock(&supp->mutex);
170 } 280 }
171 281
172 /* We have exlusive access to the data */ 282 *func = req->func;
283 *num_params = req->num_params + num_meta;
284 memcpy(param + num_meta, req->param,
285 sizeof(struct tee_param) * req->num_params);
173 286
174 if (*num_params < supp->num_params) { 287 return 0;
175 /* 288}
176 * Not enough room for parameters, tell supplicant 289
177 * it failed and abort last request. 290static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
178 */ 291 size_t num_params,
179 supp->ret = TEEC_ERROR_COMMUNICATION; 292 struct tee_param *param,
180 rc = -EINVAL; 293 size_t *num_meta)
181 complete(&supp->data_from_supp); 294{
182 goto out; 295 struct optee_supp_req *req;
296 int id;
297 size_t nm;
298 const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
299 TEE_IOCTL_PARAM_ATTR_META;
300
301 if (!num_params)
302 return ERR_PTR(-EINVAL);
303
304 if (supp->req_id == -1) {
305 if (param->attr != attr)
306 return ERR_PTR(-EINVAL);
307 id = param->u.value.a;
308 nm = 1;
309 } else {
310 id = supp->req_id;
311 nm = 0;
183 } 312 }
184 313
185 *func = supp->func; 314 req = idr_find(&supp->idr, id);
186 *num_params = supp->num_params; 315 if (!req)
187 memcpy(param, supp->param, 316 return ERR_PTR(-ENOENT);
188 sizeof(struct tee_param) * supp->num_params);
189 317
190 /* Allow optee_supp_send() below to do its work */ 318 if ((num_params - nm) != req->num_params)
191 supp->supp_next_send = true; 319 return ERR_PTR(-EINVAL);
192 320
193 rc = 0; 321 req->busy = false;
194out: 322 idr_remove(&supp->idr, id);
195 mutex_unlock(&supp->supp_mutex); 323 supp->req_id = -1;
196 return rc; 324 *num_meta = nm;
325
326 return req;
197} 327}
198 328
199/** 329/**
@@ -211,63 +341,42 @@ int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
211 struct tee_device *teedev = ctx->teedev; 341 struct tee_device *teedev = ctx->teedev;
212 struct optee *optee = tee_get_drvdata(teedev); 342 struct optee *optee = tee_get_drvdata(teedev);
213 struct optee_supp *supp = &optee->supp; 343 struct optee_supp *supp = &optee->supp;
344 struct optee_supp_req *req;
214 size_t n; 345 size_t n;
215 int rc = 0; 346 size_t num_meta;
216 347
217 /* 348 mutex_lock(&supp->mutex);
218 * We still have exclusive access to the data since that's how we 349 req = supp_pop_req(supp, num_params, param, &num_meta);
219 * left it when returning from optee_supp_read(). 350 mutex_unlock(&supp->mutex);
220 */
221
222 /* See comment on mutex in optee_supp_read() above */
223 mutex_lock(&supp->supp_mutex);
224
225 if (!supp->supp_next_send) {
226 /*
227 * Something strange is going on, supplicant shouldn't
228 * enter optee_supp_send() in this state
229 */
230 rc = -ENOENT;
231 goto out;
232 }
233 351
234 if (num_params != supp->num_params) { 352 if (IS_ERR(req)) {
235 /* 353 /* Something is wrong, let supplicant restart. */
236 * Something is wrong, let supplicant restart. Next call to 354 return PTR_ERR(req);
237 * optee_supp_recv() will give an error to the requesting
238 * thread and release it.
239 */
240 rc = -EINVAL;
241 goto out;
242 } 355 }
243 356
244 /* Update out and in/out parameters */ 357 /* Update out and in/out parameters */
245 for (n = 0; n < num_params; n++) { 358 for (n = 0; n < req->num_params; n++) {
246 struct tee_param *p = supp->param + n; 359 struct tee_param *p = req->param + n;
247 360
248 switch (p->attr) { 361 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
249 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 362 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
250 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 363 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
251 p->u.value.a = param[n].u.value.a; 364 p->u.value.a = param[n + num_meta].u.value.a;
252 p->u.value.b = param[n].u.value.b; 365 p->u.value.b = param[n + num_meta].u.value.b;
253 p->u.value.c = param[n].u.value.c; 366 p->u.value.c = param[n + num_meta].u.value.c;
254 break; 367 break;
255 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 368 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
256 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 369 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
257 p->u.memref.size = param[n].u.memref.size; 370 p->u.memref.size = param[n + num_meta].u.memref.size;
258 break; 371 break;
259 default: 372 default:
260 break; 373 break;
261 } 374 }
262 } 375 }
263 supp->ret = ret; 376 req->ret = ret;
264
265 /* Allow optee_supp_recv() above to do its work */
266 supp->supp_next_send = false;
267 377
268 /* Let the requesting thread continue */ 378 /* Let the requesting thread continue */
269 complete(&supp->data_from_supp); 379 complete(&req->c);
270out: 380
271 mutex_unlock(&supp->supp_mutex); 381 return 0;
272 return rc;
273} 382}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 3d49ac2e3c84..6c4b200a4560 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -209,11 +209,11 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
209 return -EFAULT; 209 return -EFAULT;
210 210
211 /* All unused attribute bits has to be zero */ 211 /* All unused attribute bits has to be zero */
212 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) 212 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
213 return -EINVAL; 213 return -EINVAL;
214 214
215 params[n].attr = ip.attr; 215 params[n].attr = ip.attr;
216 switch (ip.attr) { 216 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
217 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 217 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
218 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 218 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
219 break; 219 break;
@@ -278,18 +278,6 @@ static int params_to_user(struct tee_ioctl_param __user *uparams,
278 return 0; 278 return 0;
279} 279}
280 280
281static bool param_is_memref(struct tee_param *param)
282{
283 switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
284 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
285 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
286 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
287 return true;
288 default:
289 return false;
290 }
291}
292
293static int tee_ioctl_open_session(struct tee_context *ctx, 281static int tee_ioctl_open_session(struct tee_context *ctx,
294 struct tee_ioctl_buf_data __user *ubuf) 282 struct tee_ioctl_buf_data __user *ubuf)
295{ 283{
@@ -353,7 +341,7 @@ out:
353 if (params) { 341 if (params) {
354 /* Decrease ref count for all valid shared memory pointers */ 342 /* Decrease ref count for all valid shared memory pointers */
355 for (n = 0; n < arg.num_params; n++) 343 for (n = 0; n < arg.num_params; n++)
356 if (param_is_memref(params + n) && 344 if (tee_param_is_memref(params + n) &&
357 params[n].u.memref.shm) 345 params[n].u.memref.shm)
358 tee_shm_put(params[n].u.memref.shm); 346 tee_shm_put(params[n].u.memref.shm);
359 kfree(params); 347 kfree(params);
@@ -415,7 +403,7 @@ out:
415 if (params) { 403 if (params) {
416 /* Decrease ref count for all valid shared memory pointers */ 404 /* Decrease ref count for all valid shared memory pointers */
417 for (n = 0; n < arg.num_params; n++) 405 for (n = 0; n < arg.num_params; n++)
418 if (param_is_memref(params + n) && 406 if (tee_param_is_memref(params + n) &&
419 params[n].u.memref.shm) 407 params[n].u.memref.shm)
420 tee_shm_put(params[n].u.memref.shm); 408 tee_shm_put(params[n].u.memref.shm);
421 kfree(params); 409 kfree(params);
@@ -463,8 +451,8 @@ static int params_to_supp(struct tee_context *ctx,
463 struct tee_ioctl_param ip; 451 struct tee_ioctl_param ip;
464 struct tee_param *p = params + n; 452 struct tee_param *p = params + n;
465 453
466 ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK; 454 ip.attr = p->attr;
467 switch (p->attr) { 455 switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
468 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 456 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
469 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 457 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
470 ip.a = p->u.value.a; 458 ip.a = p->u.value.a;
@@ -528,6 +516,10 @@ static int tee_ioctl_supp_recv(struct tee_context *ctx,
528 if (!params) 516 if (!params)
529 return -ENOMEM; 517 return -ENOMEM;
530 518
519 rc = params_from_user(ctx, params, num_params, uarg->params);
520 if (rc)
521 goto out;
522
531 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params); 523 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
532 if (rc) 524 if (rc)
533 goto out; 525 goto out;
@@ -557,11 +549,11 @@ static int params_from_supp(struct tee_param *params, size_t num_params,
557 return -EFAULT; 549 return -EFAULT;
558 550
559 /* All unused attribute bits has to be zero */ 551 /* All unused attribute bits has to be zero */
560 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK) 552 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
561 return -EINVAL; 553 return -EINVAL;
562 554
563 p->attr = ip.attr; 555 p->attr = ip.attr;
564 switch (ip.attr) { 556 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
565 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 557 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
566 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 558 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
567 /* Only out and in/out values can be updated */ 559 /* Only out and in/out values can be updated */
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 55b198ba629b..78e92d29f8d9 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
555 unsigned iad_num = 0; 555 unsigned iad_num = 0;
556 556
557 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); 557 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
558 nintf = nintf_orig = config->desc.bNumInterfaces;
559 config->desc.bNumInterfaces = 0; // Adjusted later
560
558 if (config->desc.bDescriptorType != USB_DT_CONFIG || 561 if (config->desc.bDescriptorType != USB_DT_CONFIG ||
559 config->desc.bLength < USB_DT_CONFIG_SIZE || 562 config->desc.bLength < USB_DT_CONFIG_SIZE ||
560 config->desc.bLength > size) { 563 config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
568 buffer += config->desc.bLength; 571 buffer += config->desc.bLength;
569 size -= config->desc.bLength; 572 size -= config->desc.bLength;
570 573
571 nintf = nintf_orig = config->desc.bNumInterfaces;
572 if (nintf > USB_MAXINTERFACES) { 574 if (nintf > USB_MAXINTERFACES) {
573 dev_warn(ddev, "config %d has too many interfaces: %d, " 575 dev_warn(ddev, "config %d has too many interfaces: %d, "
574 "using maximum allowed: %d\n", 576 "using maximum allowed: %d\n",
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index f66c94130cac..31749c79045f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -537,6 +537,7 @@ struct dwc2_core_params {
537 * 2 - Internal DMA 537 * 2 - Internal DMA
538 * @power_optimized Are power optimizations enabled? 538 * @power_optimized Are power optimizations enabled?
539 * @num_dev_ep Number of device endpoints available 539 * @num_dev_ep Number of device endpoints available
540 * @num_dev_in_eps Number of device IN endpoints available
540 * @num_dev_perio_in_ep Number of device periodic IN endpoints 541 * @num_dev_perio_in_ep Number of device periodic IN endpoints
541 * available 542 * available
542 * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue 543 * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
565 * 2 - 8 or 16 bits 566 * 2 - 8 or 16 bits
566 * @snpsid: Value from SNPSID register 567 * @snpsid: Value from SNPSID register
567 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) 568 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
569 * @g_tx_fifo_size[] Power-on values of TxFIFO sizes
568 */ 570 */
569struct dwc2_hw_params { 571struct dwc2_hw_params {
570 unsigned op_mode:3; 572 unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
586 unsigned fs_phy_type:2; 588 unsigned fs_phy_type:2;
587 unsigned i2c_enable:1; 589 unsigned i2c_enable:1;
588 unsigned num_dev_ep:4; 590 unsigned num_dev_ep:4;
591 unsigned num_dev_in_eps : 4;
589 unsigned num_dev_perio_in_ep:4; 592 unsigned num_dev_perio_in_ep:4;
590 unsigned total_fifo_size:16; 593 unsigned total_fifo_size:16;
591 unsigned power_optimized:1; 594 unsigned power_optimized:1;
592 unsigned utmi_phy_data_width:2; 595 unsigned utmi_phy_data_width:2;
593 u32 snpsid; 596 u32 snpsid;
594 u32 dev_ep_dirs; 597 u32 dev_ep_dirs;
598 u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
595}; 599};
596 600
597/* Size of control and EP0 buffers */ 601/* Size of control and EP0 buffers */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88529d092503..e4c3ce0de5de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
195{ 195{
196 if (hsotg->hw_params.en_multiple_tx_fifo) 196 if (hsotg->hw_params.en_multiple_tx_fifo)
197 /* In dedicated FIFO mode we need count of IN EPs */ 197 /* In dedicated FIFO mode we need count of IN EPs */
198 return (dwc2_readl(hsotg->regs + GHWCFG4) & 198 return hsotg->hw_params.num_dev_in_eps;
199 GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
200 else 199 else
201 /* In shared FIFO mode we need count of Periodic IN EPs */ 200 /* In shared FIFO mode we need count of Periodic IN EPs */
202 return hsotg->hw_params.num_dev_perio_in_ep; 201 return hsotg->hw_params.num_dev_perio_in_ep;
203} 202}
204 203
205/** 204/**
206 * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
207 */
208static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
209{
210 int val = 0;
211 int i;
212 u32 ep_dirs;
213
214 /*
215 * Don't need additional space for ep info control registers in
216 * slave mode.
217 */
218 if (!using_dma(hsotg)) {
219 dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
220 return 0;
221 }
222
223 /*
224 * Buffer DMA mode - 1 location per endpoit
225 * Descriptor DMA mode - 4 locations per endpoint
226 */
227 ep_dirs = hsotg->hw_params.dev_ep_dirs;
228
229 for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
230 val += ep_dirs & 3 ? 1 : 2;
231 ep_dirs >>= 2;
232 }
233
234 if (using_desc_dma(hsotg))
235 val = val * 4;
236
237 return val;
238}
239
240/**
241 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 205 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
242 * device mode TX FIFOs 206 * device mode TX FIFOs
243 */ 207 */
244int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 208int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
245{ 209{
246 int ep_info_size;
247 int addr; 210 int addr;
248 int tx_addr_max; 211 int tx_addr_max;
249 u32 np_tx_fifo_size; 212 u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
252 hsotg->params.g_np_tx_fifo_size); 215 hsotg->params.g_np_tx_fifo_size);
253 216
254 /* Get Endpoint Info Control block size in DWORDs. */ 217 /* Get Endpoint Info Control block size in DWORDs. */
255 ep_info_size = dwc2_hsotg_ep_info_size(hsotg); 218 tx_addr_max = hsotg->hw_params.total_fifo_size;
256 tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
257 219
258 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 220 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
259 if (tx_addr_max <= addr) 221 if (tx_addr_max <= addr)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef73af6e03a9..03fd20f0b496 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
484 } 484 }
485 485
486 for (fifo = 1; fifo <= fifo_count; fifo++) { 486 for (fifo = 1; fifo <= fifo_count; fifo++) {
487 dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) & 487 dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
488 FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
489 488
490 if (hsotg->params.g_tx_fifo_size[fifo] < min || 489 if (hsotg->params.g_tx_fifo_size[fifo] < min ||
491 hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) { 490 hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
609 struct dwc2_hw_params *hw = &hsotg->hw_params; 608 struct dwc2_hw_params *hw = &hsotg->hw_params;
610 bool forced; 609 bool forced;
611 u32 gnptxfsiz; 610 u32 gnptxfsiz;
611 int fifo, fifo_count;
612 612
613 if (hsotg->dr_mode == USB_DR_MODE_HOST) 613 if (hsotg->dr_mode == USB_DR_MODE_HOST)
614 return; 614 return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
617 617
618 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 618 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
619 619
620 fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
621
622 for (fifo = 1; fifo <= fifo_count; fifo++) {
623 hw->g_tx_fifo_size[fifo] =
624 (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
625 FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
626 }
627
620 if (forced) 628 if (forced)
621 dwc2_clear_force_mode(hsotg); 629 dwc2_clear_force_mode(hsotg);
622 630
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
661 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); 669 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
662 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 670 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
663 671
664 /*
665 * Host specific hardware parameters. Reading these parameters
666 * requires the controller to be in host mode. The mode will
667 * be forced, if necessary, to read these values.
668 */
669 dwc2_get_host_hwparams(hsotg);
670 dwc2_get_dev_hwparams(hsotg);
671
672 /* hwcfg1 */ 672 /* hwcfg1 */
673 hw->dev_ep_dirs = hwcfg1; 673 hw->dev_ep_dirs = hwcfg1;
674 674
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
711 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 711 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
712 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 712 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
713 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 713 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
714 hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
715 GHWCFG4_NUM_IN_EPS_SHIFT;
714 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 716 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
715 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 717 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
716 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 718 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
719 /* fifo sizes */ 721 /* fifo sizes */
720 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 722 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
721 GRXFSIZ_DEPTH_SHIFT; 723 GRXFSIZ_DEPTH_SHIFT;
724 /*
725 * Host specific hardware parameters. Reading these parameters
726 * requires the controller to be in host mode. The mode will
727 * be forced, if necessary, to read these values.
728 */
729 dwc2_get_host_hwparams(hsotg);
730 dwc2_get_dev_hwparams(hsotg);
722 731
723 return 0; 732 return 0;
724} 733}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index c4a4d7bd2766..7ae0eefc7cc7 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
51 51
52 clk = of_clk_get(np, i); 52 clk = of_clk_get(np, i);
53 if (IS_ERR(clk)) { 53 if (IS_ERR(clk)) {
54 while (--i >= 0) 54 while (--i >= 0) {
55 clk_disable_unprepare(simple->clks[i]);
55 clk_put(simple->clks[i]); 56 clk_put(simple->clks[i]);
57 }
56 return PTR_ERR(clk); 58 return PTR_ERR(clk);
57 } 59 }
58 60
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
203 .driver = { 205 .driver = {
204 .name = "dwc3-of-simple", 206 .name = "dwc3-of-simple",
205 .of_match_table = of_dwc3_simple_match, 207 .of_match_table = of_dwc3_simple_match,
208 .pm = &dwc3_of_simple_dev_pm_ops,
206 }, 209 },
207}; 210};
208 211
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 981fd986cf82..639dd1b163a0 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
259{ 259{
260 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 260 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
261 struct dwc3 *dwc = dep->dwc; 261 struct dwc3 *dwc = dep->dwc;
262 u32 timeout = 500; 262 u32 timeout = 1000;
263 u32 reg; 263 u32 reg;
264 264
265 int cmd_status = 0; 265 int cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
912 */ 912 */
913 if (speed == USB_SPEED_HIGH) { 913 if (speed == USB_SPEED_HIGH) {
914 struct usb_ep *ep = &dep->endpoint; 914 struct usb_ep *ep = &dep->endpoint;
915 unsigned int mult = ep->mult - 1; 915 unsigned int mult = 2;
916 unsigned int maxp = usb_endpoint_maxp(ep->desc); 916 unsigned int maxp = usb_endpoint_maxp(ep->desc);
917 917
918 if (length <= (2 * maxp)) 918 if (length <= (2 * maxp))
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 0a19a76645ad..31cce7805eb2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -508,8 +508,8 @@ choice
508 controller, and the relevant drivers for each function declared 508 controller, and the relevant drivers for each function declared
509 by the device. 509 by the device.
510 510
511endchoice
512
513source "drivers/usb/gadget/legacy/Kconfig" 511source "drivers/usb/gadget/legacy/Kconfig"
514 512
513endchoice
514
515endif # USB_GADGET 515endif # USB_GADGET
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 9570bbeced4f..784bf86dad4f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -13,14 +13,6 @@
13# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG). 13# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
14# 14#
15 15
16menuconfig USB_GADGET_LEGACY
17 bool "Legacy USB Gadget Support"
18 help
19 Legacy USB gadgets are USB gadgets that do not use the USB gadget
20 configfs interface.
21
22if USB_GADGET_LEGACY
23
24config USB_ZERO 16config USB_ZERO
25 tristate "Gadget Zero (DEVELOPMENT)" 17 tristate "Gadget Zero (DEVELOPMENT)"
26 select USB_LIBCOMPOSITE 18 select USB_LIBCOMPOSITE
@@ -487,7 +479,7 @@ endif
487# or video class gadget drivers), or specific hardware, here. 479# or video class gadget drivers), or specific hardware, here.
488config USB_G_WEBCAM 480config USB_G_WEBCAM
489 tristate "USB Webcam Gadget" 481 tristate "USB Webcam Gadget"
490 depends on VIDEO_DEV 482 depends on VIDEO_V4L2
491 select USB_LIBCOMPOSITE 483 select USB_LIBCOMPOSITE
492 select VIDEOBUF2_VMALLOC 484 select VIDEOBUF2_VMALLOC
493 select USB_F_UVC 485 select USB_F_UVC
@@ -498,5 +490,3 @@ config USB_G_WEBCAM
498 490
499 Say "y" to link the driver statically, or "m" to build a 491 Say "y" to link the driver statically, or "m" to build a
500 dynamically linked module called "g_webcam". 492 dynamically linked module called "g_webcam".
501
502endif
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 15f7d422885f..3a29b32a3bd0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -971,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
971 return 0; 971 return 0;
972 } 972 }
973 973
974 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 974 dev = kzalloc(sizeof(*dev), flags);
975 if (!xhci->devs[slot_id]) 975 if (!dev)
976 return 0; 976 return 0;
977 dev = xhci->devs[slot_id];
978 977
979 /* Allocate the (output) device context that will be used in the HC. */ 978 /* Allocate the (output) device context that will be used in the HC. */
980 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 979 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1015,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
1015 1014
1016 trace_xhci_alloc_virt_device(dev); 1015 trace_xhci_alloc_virt_device(dev);
1017 1016
1017 xhci->devs[slot_id] = dev;
1018
1018 return 1; 1019 return 1;
1019fail: 1020fail:
1020 xhci_free_virt_device(xhci, slot_id); 1021
1022 if (dev->in_ctx)
1023 xhci_free_container_ctx(xhci, dev->in_ctx);
1024 if (dev->out_ctx)
1025 xhci_free_container_ctx(xhci, dev->out_ctx);
1026 kfree(dev);
1027
1021 return 0; 1028 return 0;
1022} 1029}
1023 1030
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6eb87c6e4d24..c5cbc685c691 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3112,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3112{ 3112{
3113 u32 maxp, total_packet_count; 3113 u32 maxp, total_packet_count;
3114 3114
3115 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ 3115 /* MTK xHCI 0.96 contains some features from 1.0 */
3116 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) 3116 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3117 return ((td_total_len - transferred) >> 10); 3117 return ((td_total_len - transferred) >> 10);
3118 3118
@@ -3121,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3121 trb_buff_len == td_total_len) 3121 trb_buff_len == td_total_len)
3122 return 0; 3122 return 0;
3123 3123
3124 /* for MTK xHCI, TD size doesn't include this TRB */ 3124 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3125 if (xhci->quirks & XHCI_MTK_HOST) 3125 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3126 trb_buff_len = 0; 3126 trb_buff_len = 0;
3127 3127
3128 maxp = usb_endpoint_maxp(&urb->ep->desc); 3128 maxp = usb_endpoint_maxp(&urb->ep->desc);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0397606a211b..6c036de63272 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
284 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; 284 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
285 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 285 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
286 del_timer(&musb->dev_timer); 286 del_timer(&musb->dev_timer);
287 } else { 287 } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
288 /*
289 * When babble condition happens, drvvbus interrupt
290 * is also generated. Ignore this drvvbus interrupt
291 * and let babble interrupt handler recovers the
292 * controller; otherwise, the host-mode flag is lost
293 * due to the MUSB_DEV_MODE() call below and babble
294 * recovery logic will not be called.
295 */
288 musb->is_active = 0; 296 musb->is_active = 0;
289 MUSB_DEV_MODE(musb); 297 MUSB_DEV_MODE(musb);
290 otg->default_a = 0; 298 otg->default_a = 0;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2968046e7c05..f72d045ee9ef 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
2100 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2100 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2101 US_FL_BROKEN_FUA ), 2101 US_FL_BROKEN_FUA ),
2102 2102
2103/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
2104UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
2105 "JMicron",
2106 "JMS567",
2107 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2108 US_FL_BROKEN_FUA),
2109
2103/* 2110/*
2104 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> 2111 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
2105 * JMicron responds to USN and several other SCSI ioctls with a 2112 * JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d520374a824e..e6127fb21c12 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
129 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 129 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
130 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), 130 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
131 131
132/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
133UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
134 "JMicron",
135 "JMS567",
136 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
137 US_FL_BROKEN_FUA),
138
132/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 139/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
133UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 140UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
134 "VIA", 141 "VIA",
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 536e037f541f..493ac2928391 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -322,23 +322,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
322 return priv; 322 return priv;
323} 323}
324 324
325static int get_pipe(struct stub_device *sdev, int epnum, int dir) 325static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
326{ 326{
327 struct usb_device *udev = sdev->udev; 327 struct usb_device *udev = sdev->udev;
328 struct usb_host_endpoint *ep; 328 struct usb_host_endpoint *ep;
329 struct usb_endpoint_descriptor *epd = NULL; 329 struct usb_endpoint_descriptor *epd = NULL;
330 int epnum = pdu->base.ep;
331 int dir = pdu->base.direction;
332
333 if (epnum < 0 || epnum > 15)
334 goto err_ret;
330 335
331 if (dir == USBIP_DIR_IN) 336 if (dir == USBIP_DIR_IN)
332 ep = udev->ep_in[epnum & 0x7f]; 337 ep = udev->ep_in[epnum & 0x7f];
333 else 338 else
334 ep = udev->ep_out[epnum & 0x7f]; 339 ep = udev->ep_out[epnum & 0x7f];
335 if (!ep) { 340 if (!ep)
336 dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", 341 goto err_ret;
337 epnum);
338 BUG();
339 }
340 342
341 epd = &ep->desc; 343 epd = &ep->desc;
344
345 /* validate transfer_buffer_length */
346 if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
347 dev_err(&sdev->udev->dev,
348 "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
349 pdu->u.cmd_submit.transfer_buffer_length);
350 return -1;
351 }
352
342 if (usb_endpoint_xfer_control(epd)) { 353 if (usb_endpoint_xfer_control(epd)) {
343 if (dir == USBIP_DIR_OUT) 354 if (dir == USBIP_DIR_OUT)
344 return usb_sndctrlpipe(udev, epnum); 355 return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +372,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
361 } 372 }
362 373
363 if (usb_endpoint_xfer_isoc(epd)) { 374 if (usb_endpoint_xfer_isoc(epd)) {
375 /* validate packet size and number of packets */
376 unsigned int maxp, packets, bytes;
377
378 maxp = usb_endpoint_maxp(epd);
379 maxp *= usb_endpoint_maxp_mult(epd);
380 bytes = pdu->u.cmd_submit.transfer_buffer_length;
381 packets = DIV_ROUND_UP(bytes, maxp);
382
383 if (pdu->u.cmd_submit.number_of_packets < 0 ||
384 pdu->u.cmd_submit.number_of_packets > packets) {
385 dev_err(&sdev->udev->dev,
386 "CMD_SUBMIT: isoc invalid num packets %d\n",
387 pdu->u.cmd_submit.number_of_packets);
388 return -1;
389 }
364 if (dir == USBIP_DIR_OUT) 390 if (dir == USBIP_DIR_OUT)
365 return usb_sndisocpipe(udev, epnum); 391 return usb_sndisocpipe(udev, epnum);
366 else 392 else
367 return usb_rcvisocpipe(udev, epnum); 393 return usb_rcvisocpipe(udev, epnum);
368 } 394 }
369 395
396err_ret:
370 /* NOT REACHED */ 397 /* NOT REACHED */
371 dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); 398 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
372 return 0; 399 return -1;
373} 400}
374 401
375static void masking_bogus_flags(struct urb *urb) 402static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +460,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
433 struct stub_priv *priv; 460 struct stub_priv *priv;
434 struct usbip_device *ud = &sdev->ud; 461 struct usbip_device *ud = &sdev->ud;
435 struct usb_device *udev = sdev->udev; 462 struct usb_device *udev = sdev->udev;
436 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); 463 int pipe = get_pipe(sdev, pdu);
464
465 if (pipe == -1)
466 return;
437 467
438 priv = stub_priv_alloc(sdev, pdu); 468 priv = stub_priv_alloc(sdev, pdu);
439 if (!priv) 469 if (!priv)
@@ -452,7 +482,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
452 } 482 }
453 483
454 /* allocate urb transfer buffer, if needed */ 484 /* allocate urb transfer buffer, if needed */
455 if (pdu->u.cmd_submit.transfer_buffer_length > 0) { 485 if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
486 pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
456 priv->urb->transfer_buffer = 487 priv->urb->transfer_buffer =
457 kzalloc(pdu->u.cmd_submit.transfer_buffer_length, 488 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
458 GFP_KERNEL); 489 GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b18bce96c212..53172b1f6257 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
167 memset(&pdu_header, 0, sizeof(pdu_header)); 167 memset(&pdu_header, 0, sizeof(pdu_header));
168 memset(&msg, 0, sizeof(msg)); 168 memset(&msg, 0, sizeof(msg));
169 169
170 if (urb->actual_length > 0 && !urb->transfer_buffer) {
171 dev_err(&sdev->udev->dev,
172 "urb: actual_length %d transfer_buffer null\n",
173 urb->actual_length);
174 return -1;
175 }
176
170 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 177 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
171 iovnum = 2 + urb->number_of_packets; 178 iovnum = 2 + urb->number_of_packets;
172 else 179 else
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index e5de35c8c505..473fb8a87289 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -256,6 +256,7 @@ struct usbip_device {
256 /* lock for status */ 256 /* lock for status */
257 spinlock_t lock; 257 spinlock_t lock;
258 258
259 int sockfd;
259 struct socket *tcp_socket; 260 struct socket *tcp_socket;
260 261
261 struct task_struct *tcp_rx; 262 struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e78f7472cac4..091f76b7196d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,15 +17,20 @@
17 17
18/* 18/*
19 * output example: 19 * output example:
20 * hub port sta spd dev socket local_busid 20 * hub port sta spd dev sockfd local_busid
21 * hs 0000 004 000 00000000 c5a7bb80 1-2.3 21 * hs 0000 004 000 00000000 3 1-2.3
22 * ................................................ 22 * ................................................
23 * ss 0008 004 000 00000000 d8cee980 2-3.4 23 * ss 0008 004 000 00000000 4 2-3.4
24 * ................................................ 24 * ................................................
25 * 25 *
26 * IP address can be retrieved from a socket pointer address by looking 26 * Output includes socket fd instead of socket pointer address to avoid
27 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a 27 * leaking kernel memory address in:
28 * port number and its peer IP address. 28 * /sys/devices/platform/vhci_hcd.0/status and in debug output.
29 * The socket pointer address is not used at the moment and it was made
30 * visible as a convenient way to find IP address from socket pointer
31 * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
32 * hole, the change is made to use sockfd instead.
33 *
29 */ 34 */
30static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) 35static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
31{ 36{
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
39 if (vdev->ud.status == VDEV_ST_USED) { 44 if (vdev->ud.status == VDEV_ST_USED) {
40 *out += sprintf(*out, "%03u %08x ", 45 *out += sprintf(*out, "%03u %08x ",
41 vdev->speed, vdev->devid); 46 vdev->speed, vdev->devid);
42 *out += sprintf(*out, "%16p %s", 47 *out += sprintf(*out, "%u %s",
43 vdev->ud.tcp_socket, 48 vdev->ud.sockfd,
44 dev_name(&vdev->udev->dev)); 49 dev_name(&vdev->udev->dev));
45 50
46 } else { 51 } else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
160 char *s = out; 165 char *s = out;
161 166
162 /* 167 /*
163 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. 168 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
169 * thus the * 2.
164 */ 170 */
165 out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); 171 out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
166 return out - s; 172 return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
366 372
367 vdev->devid = devid; 373 vdev->devid = devid;
368 vdev->speed = speed; 374 vdev->speed = speed;
375 vdev->ud.sockfd = sockfd;
369 vdev->ud.tcp_socket = socket; 376 vdev->ud.tcp_socket = socket;
370 vdev->ud.status = VDEV_ST_NOTASSIGNED; 377 vdev->ud.status = VDEV_ST_NOTASSIGNED;
371 378
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index a9192fe4f345..c92131edfaba 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -522,10 +522,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
522 return -EBUSY; 522 return -EBUSY;
523 523
524 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); 524 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
525 if (!vm_dev) { 525 if (!vm_dev)
526 rc = -ENOMEM; 526 return -ENOMEM;
527 goto free_mem;
528 }
529 527
530 vm_dev->vdev.dev.parent = &pdev->dev; 528 vm_dev->vdev.dev.parent = &pdev->dev;
531 vm_dev->vdev.dev.release = virtio_mmio_release_dev; 529 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
@@ -535,17 +533,14 @@ static int virtio_mmio_probe(struct platform_device *pdev)
535 spin_lock_init(&vm_dev->lock); 533 spin_lock_init(&vm_dev->lock);
536 534
537 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 535 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
538 if (vm_dev->base == NULL) { 536 if (vm_dev->base == NULL)
539 rc = -EFAULT; 537 return -EFAULT;
540 goto free_vmdev;
541 }
542 538
543 /* Check magic value */ 539 /* Check magic value */
544 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); 540 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
545 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { 541 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
546 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); 542 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
547 rc = -ENODEV; 543 return -ENODEV;
548 goto unmap;
549 } 544 }
550 545
551 /* Check device version */ 546 /* Check device version */
@@ -553,8 +548,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
553 if (vm_dev->version < 1 || vm_dev->version > 2) { 548 if (vm_dev->version < 1 || vm_dev->version > 2) {
554 dev_err(&pdev->dev, "Version %ld not supported!\n", 549 dev_err(&pdev->dev, "Version %ld not supported!\n",
555 vm_dev->version); 550 vm_dev->version);
556 rc = -ENXIO; 551 return -ENXIO;
557 goto unmap;
558 } 552 }
559 553
560 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 554 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -563,8 +557,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
563 * virtio-mmio device with an ID 0 is a (dummy) placeholder 557 * virtio-mmio device with an ID 0 is a (dummy) placeholder
564 * with no function. End probing now with no error reported. 558 * with no function. End probing now with no error reported.
565 */ 559 */
566 rc = -ENODEV; 560 return -ENODEV;
567 goto unmap;
568 } 561 }
569 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 562 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
570 563
@@ -590,33 +583,15 @@ static int virtio_mmio_probe(struct platform_device *pdev)
590 platform_set_drvdata(pdev, vm_dev); 583 platform_set_drvdata(pdev, vm_dev);
591 584
592 rc = register_virtio_device(&vm_dev->vdev); 585 rc = register_virtio_device(&vm_dev->vdev);
593 if (rc) { 586 if (rc)
594 iounmap(vm_dev->base);
595 devm_release_mem_region(&pdev->dev, mem->start,
596 resource_size(mem));
597 put_device(&vm_dev->vdev.dev); 587 put_device(&vm_dev->vdev.dev);
598 } 588
599 return rc;
600unmap:
601 iounmap(vm_dev->base);
602free_mem:
603 devm_release_mem_region(&pdev->dev, mem->start,
604 resource_size(mem));
605free_vmdev:
606 devm_kfree(&pdev->dev, vm_dev);
607 return rc; 589 return rc;
608} 590}
609 591
610static int virtio_mmio_remove(struct platform_device *pdev) 592static int virtio_mmio_remove(struct platform_device *pdev)
611{ 593{
612 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); 594 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
613 struct resource *mem;
614
615 iounmap(vm_dev->base);
616 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
617 if (mem)
618 devm_release_mem_region(&pdev->dev, mem->start,
619 resource_size(mem));
620 unregister_virtio_device(&vm_dev->vdev); 595 unregister_virtio_device(&vm_dev->vdev);
621 596
622 return 0; 597 return 0;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d8dd54678ab7..e5d0c28372ea 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
269 269
270config XEN_ACPI_PROCESSOR 270config XEN_ACPI_PROCESSOR
271 tristate "Xen ACPI processor" 271 tristate "Xen ACPI processor"
272 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ 272 depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
273 default m 273 default m
274 help 274 help
275 This ACPI processor uploads Power Management information to the Xen 275 This ACPI processor uploads Power Management information to the Xen