aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acnamesp.h4
-rw-r--r--drivers/acpi/acpica/exconfig.c14
-rw-r--r--drivers/acpi/acpica/nsinit.c76
-rw-r--r--drivers/acpi/device_pm.c9
-rw-r--r--drivers/amba/bus.c4
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/base/base.h3
-rw-r--r--drivers/base/core.c20
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/base/platform.c17
-rw-r--r--drivers/base/power/common.c10
-rw-r--r--drivers/base/power/domain.c212
-rw-r--r--drivers/base/power/main.c44
-rw-r--r--drivers/base/power/power.h30
-rw-r--r--drivers/base/power/runtime.c29
-rw-r--r--drivers/base/power/wakeup.c78
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/clk/Kconfig6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c18
-rw-r--r--drivers/firmware/arm_scmi/driver.c1
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c10
-rw-r--r--drivers/firmware/qcom_scm-32.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c7
-rw-r--r--drivers/gpu/drm/drm_file.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/k10temp.c51
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c5
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/i2c-core-acpi.c13
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c10
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig2
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c22
-rw-r--r--drivers/input/mouse/synaptics.c6
-rw-r--r--drivers/isdn/hardware/eicon/diva.c22
-rw-r--r--drivers/isdn/hardware/eicon/diva.h5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c18
-rw-r--r--drivers/md/bcache/debug.c4
-rw-r--r--drivers/mfd/cros_ec_spi.c24
-rw-r--r--drivers/misc/cxl/cxl.h1
-rw-r--r--drivers/misc/cxl/pci.c12
-rw-r--r--drivers/misc/cxl/sysfs.c10
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c33
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c8
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c36
-rw-r--r--drivers/net/ethernet/3com/3c59x.c104
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c88
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c61
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/bcm-cygnus.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h7
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c27
-rw-r--r--drivers/net/tun.c46
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c72
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/opp/core.c267
-rw-r--r--drivers/opp/debugfs.c15
-rw-r--r--drivers/opp/of.c184
-rw-r--r--drivers/opp/opp.h6
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-wmi.c23
-rw-r--r--drivers/reset/reset-uniphier.c6
-rw-r--r--drivers/s390/block/dasd.c7
-rw-r--r--drivers/s390/cio/qdio_setup.c12
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c13
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c23
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/commsup.c8
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr_ioctl.c10
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/soc/tegra/pmc.c20
-rw-r--r--drivers/soundwire/bus_type.c15
-rw-r--r--drivers/spi/spi-bcm-qspi.c28
-rw-r--r--drivers/spi/spi-bcm2835aux.c5
-rw-r--r--drivers/spi/spi-cadence.c8
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/ssb/Kconfig4
-rw-r--r--drivers/staging/lustre/lnet/Kconfig2
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/tee/tee_core.c11
-rw-r--r--drivers/tee/tee_shm.c5
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/musb/musb_host.c5
-rw-r--r--drivers/usb/musb/musb_host.h7
-rw-r--r--drivers/usb/musb/musb_virthub.c25
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c43
-rw-r--r--drivers/usb/usbip/stub_main.c105
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/xen/swiotlb-xen.c2
164 files changed, 1776 insertions, 1065 deletions
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 514aaf948ea9..3825df923480 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -56,6 +56,10 @@ acpi_status acpi_ns_initialize_objects(void);
56 56
57acpi_status acpi_ns_initialize_devices(u32 flags); 57acpi_status acpi_ns_initialize_devices(u32 flags);
58 58
59acpi_status
60acpi_ns_init_one_package(acpi_handle obj_handle,
61 u32 level, void *context, void **return_value);
62
59/* 63/*
60 * nsload - Namespace loading 64 * nsload - Namespace loading
61 */ 65 */
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 99d92cb32803..f85c6f3271f6 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -174,6 +174,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
174 return_ACPI_STATUS(status); 174 return_ACPI_STATUS(status);
175 } 175 }
176 176
177 /* Complete the initialization/resolution of package objects */
178
179 status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
180 ACPI_UINT32_MAX, 0,
181 acpi_ns_init_one_package, NULL, NULL,
182 NULL);
183
177 /* Parameter Data (optional) */ 184 /* Parameter Data (optional) */
178 185
179 if (parameter_node) { 186 if (parameter_node) {
@@ -430,6 +437,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
430 return_ACPI_STATUS(status); 437 return_ACPI_STATUS(status);
431 } 438 }
432 439
440 /* Complete the initialization/resolution of package objects */
441
442 status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
443 ACPI_UINT32_MAX, 0,
444 acpi_ns_init_one_package, NULL, NULL,
445 NULL);
446
433 /* Store the ddb_handle into the Target operand */ 447 /* Store the ddb_handle into the Target operand */
434 448
435 status = acpi_ex_store(ddb_handle, target, walk_state); 449 status = acpi_ex_store(ddb_handle, target, walk_state);
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 77f2b5f4948a..d77257d1c827 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -242,6 +242,58 @@ error_exit:
242 242
243/******************************************************************************* 243/*******************************************************************************
244 * 244 *
245 * FUNCTION: acpi_ns_init_one_package
246 *
247 * PARAMETERS: obj_handle - Node
248 * level - Current nesting level
249 * context - Not used
250 * return_value - Not used
251 *
252 * RETURN: Status
253 *
254 * DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every package
255 * within the namespace. Used during dynamic load of an SSDT.
256 *
257 ******************************************************************************/
258
259acpi_status
260acpi_ns_init_one_package(acpi_handle obj_handle,
261 u32 level, void *context, void **return_value)
262{
263 acpi_status status;
264 union acpi_operand_object *obj_desc;
265 struct acpi_namespace_node *node =
266 (struct acpi_namespace_node *)obj_handle;
267
268 obj_desc = acpi_ns_get_attached_object(node);
269 if (!obj_desc) {
270 return (AE_OK);
271 }
272
273 /* Exit if package is already initialized */
274
275 if (obj_desc->package.flags & AOPOBJ_DATA_VALID) {
276 return (AE_OK);
277 }
278
279 status = acpi_ds_get_package_arguments(obj_desc);
280 if (ACPI_FAILURE(status)) {
281 return (AE_OK);
282 }
283
284 status =
285 acpi_ut_walk_package_tree(obj_desc, NULL,
286 acpi_ds_init_package_element, NULL);
287 if (ACPI_FAILURE(status)) {
288 return (AE_OK);
289 }
290
291 obj_desc->package.flags |= AOPOBJ_DATA_VALID;
292 return (AE_OK);
293}
294
295/*******************************************************************************
296 *
245 * FUNCTION: acpi_ns_init_one_object 297 * FUNCTION: acpi_ns_init_one_object
246 * 298 *
247 * PARAMETERS: obj_handle - Node 299 * PARAMETERS: obj_handle - Node
@@ -360,27 +412,11 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
360 412
361 case ACPI_TYPE_PACKAGE: 413 case ACPI_TYPE_PACKAGE:
362 414
363 info->package_init++; 415 /* Complete the initialization/resolution of the package object */
364 status = acpi_ds_get_package_arguments(obj_desc);
365 if (ACPI_FAILURE(status)) {
366 break;
367 }
368
369 ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE,
370 "%s: Completing resolution of Package elements\n",
371 ACPI_GET_FUNCTION_NAME));
372 416
373 /* 417 info->package_init++;
374 * Resolve all named references in package objects (and all 418 status =
375 * sub-packages). This action has been deferred until the entire 419 acpi_ns_init_one_package(obj_handle, level, NULL, NULL);
376 * namespace has been loaded, in order to support external and
377 * forward references from individual package elements (05/2017).
378 */
379 status = acpi_ut_walk_package_tree(obj_desc, NULL,
380 acpi_ds_init_package_element,
381 NULL);
382
383 obj_desc->package.flags |= AOPOBJ_DATA_VALID;
384 break; 420 break;
385 421
386 default: 422 default:
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3d96e4da2d98..a7c2673ffd36 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1257,10 +1257,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
1257 struct acpi_device *adev = ACPI_COMPANION(dev); 1257 struct acpi_device *adev = ACPI_COMPANION(dev);
1258 1258
1259 if (!adev) 1259 if (!adev)
1260 return -ENODEV; 1260 return 0;
1261
1262 if (dev->pm_domain)
1263 return -EEXIST;
1264 1261
1265 /* 1262 /*
1266 * Only attach the power domain to the first device if the 1263 * Only attach the power domain to the first device if the
@@ -1268,7 +1265,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
1268 * management twice. 1265 * management twice.
1269 */ 1266 */
1270 if (!acpi_device_is_first_physical_node(adev, dev)) 1267 if (!acpi_device_is_first_physical_node(adev, dev))
1271 return -EBUSY; 1268 return 0;
1272 1269
1273 acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func); 1270 acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
1274 dev_pm_domain_set(dev, &acpi_general_pm_domain); 1271 dev_pm_domain_set(dev, &acpi_general_pm_domain);
@@ -1278,7 +1275,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
1278 } 1275 }
1279 1276
1280 dev->pm_domain->detach = acpi_dev_pm_detach; 1277 dev->pm_domain->detach = acpi_dev_pm_detach;
1281 return 0; 1278 return 1;
1282} 1279}
1283EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); 1280EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
1284#endif /* CONFIG_PM */ 1281#endif /* CONFIG_PM */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 4a3ac31c07d0..b0160b5c5608 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -248,7 +248,7 @@ static int amba_probe(struct device *dev)
248 break; 248 break;
249 249
250 ret = dev_pm_domain_attach(dev, true); 250 ret = dev_pm_domain_attach(dev, true);
251 if (ret == -EPROBE_DEFER) 251 if (ret)
252 break; 252 break;
253 253
254 ret = amba_get_enable_pclk(pcdev); 254 ret = amba_get_enable_pclk(pcdev);
@@ -375,7 +375,7 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
375 } 375 }
376 376
377 ret = dev_pm_domain_attach(&dev->dev, true); 377 ret = dev_pm_domain_attach(&dev->dev, true);
378 if (ret == -EPROBE_DEFER) { 378 if (ret) {
379 iounmap(tmp); 379 iounmap(tmp);
380 goto err_release; 380 goto err_release;
381 } 381 }
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6389c88b3500..738fb22978dd 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
334 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */ 334 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
335 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */ 335 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
336 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */ 336 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
337 { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
337 { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ 338 { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
338 { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ 339 { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
339 { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ 340 { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 68596bd4cf06..346b163f6e89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4493 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4493 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4494 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4494 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4495 4495
4496 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4497 SD7SN6S256G and SD8SN8U256G */
4498 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4499
4496 /* devices which puke on READ_NATIVE_MAX */ 4500 /* devices which puke on READ_NATIVE_MAX */
4497 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4501 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4498 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4502 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4549,13 +4553,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4549 ATA_HORKAGE_ZERO_AFTER_TRIM | 4553 ATA_HORKAGE_ZERO_AFTER_TRIM |
4550 ATA_HORKAGE_NOLPM, }, 4554 ATA_HORKAGE_NOLPM, },
4551 4555
4552 /* This specific Samsung model/firmware-rev does not handle LPM well */ 4556 /* These specific Samsung models/firmware-revs do not handle LPM well */
4553 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4557 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4558 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4554 4559
4555 /* Sandisk devices which are known to not handle LPM well */ 4560 /* Sandisk devices which are known to not handle LPM well */
4556 { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, 4561 { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
4557 4562
4558 /* devices that don't properly handle queued TRIM commands */ 4563 /* devices that don't properly handle queued TRIM commands */
4564 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4565 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4559 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4566 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4560 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4567 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4561 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4568 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/base/base.h b/drivers/base/base.h
index d800de650fa5..a75c3025fb78 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -161,3 +161,6 @@ extern void device_links_driver_cleanup(struct device *dev);
161extern void device_links_no_driver(struct device *dev); 161extern void device_links_no_driver(struct device *dev);
162extern bool device_links_busy(struct device *dev); 162extern bool device_links_busy(struct device *dev);
163extern void device_links_unbind_consumers(struct device *dev); 163extern void device_links_unbind_consumers(struct device *dev);
164
165/* device pm support */
166void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b610816eb887..ad7b50897bcc 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -145,6 +145,26 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
145} 145}
146 146
147/** 147/**
148 * device_pm_move_to_tail - Move set of devices to the end of device lists
149 * @dev: Device to move
150 *
151 * This is a device_reorder_to_tail() wrapper taking the requisite locks.
152 *
153 * It moves the @dev along with all of its children and all of its consumers
154 * to the ends of the device_kset and dpm_list, recursively.
155 */
156void device_pm_move_to_tail(struct device *dev)
157{
158 int idx;
159
160 idx = device_links_read_lock();
161 device_pm_lock();
162 device_reorder_to_tail(dev, NULL);
163 device_pm_unlock();
164 device_links_read_unlock(idx);
165}
166
167/**
148 * device_link_add - Create a link between two devices. 168 * device_link_add - Create a link between two devices.
149 * @consumer: Consumer end of the link. 169 * @consumer: Consumer end of the link.
150 * @supplier: Supplier end of the link. 170 * @supplier: Supplier end of the link.
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2da998baa75c..30cc9c877ebb 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
534 return sprintf(buf, "Not affected\n"); 534 return sprintf(buf, "Not affected\n");
535} 535}
536 536
537ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
538 struct device_attribute *attr, char *buf)
539{
540 return sprintf(buf, "Not affected\n");
541}
542
537static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); 543static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
538static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); 544static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
539static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); 545static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
546static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
540 547
541static struct attribute *cpu_root_vulnerabilities_attrs[] = { 548static struct attribute *cpu_root_vulnerabilities_attrs[] = {
542 &dev_attr_meltdown.attr, 549 &dev_attr_meltdown.attr,
543 &dev_attr_spectre_v1.attr, 550 &dev_attr_spectre_v1.attr,
544 &dev_attr_spectre_v2.attr, 551 &dev_attr_spectre_v2.attr,
552 &dev_attr_spec_store_bypass.attr,
545 NULL 553 NULL
546}; 554};
547 555
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c9f54089429b..a41c91bfac0e 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -122,9 +122,7 @@ static void deferred_probe_work_func(struct work_struct *work)
122 * the list is a good order for suspend but deferred 122 * the list is a good order for suspend but deferred
123 * probe makes that very unsafe. 123 * probe makes that very unsafe.
124 */ 124 */
125 device_pm_lock(); 125 device_pm_move_to_tail(dev);
126 device_pm_move_last(dev);
127 device_pm_unlock();
128 126
129 dev_dbg(dev, "Retrying from deferred list\n"); 127 dev_dbg(dev, "Retrying from deferred list\n");
130 if (initcall_debug && !initcalls_done) 128 if (initcall_debug && !initcalls_done)
@@ -582,7 +580,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
582 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 580 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
583 drv->bus->name, __func__, dev_name(dev), drv->name); 581 drv->bus->name, __func__, dev_name(dev), drv->name);
584 582
585 pm_runtime_get_suppliers(dev); 583 pm_runtime_resume_suppliers(dev);
586 if (dev->parent) 584 if (dev->parent)
587 pm_runtime_get_sync(dev->parent); 585 pm_runtime_get_sync(dev->parent);
588 586
@@ -593,7 +591,6 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
593 if (dev->parent) 591 if (dev->parent)
594 pm_runtime_put(dev->parent); 592 pm_runtime_put(dev->parent);
595 593
596 pm_runtime_put_suppliers(dev);
597 return ret; 594 return ret;
598} 595}
599 596
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 7a3a580821e0..a5e821d09656 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
490 return 0; 490 return 0;
491} 491}
492 492
493int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) 493int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
494 bool check_nid)
494{ 495{
495 unsigned long end_pfn = start_pfn + nr_pages; 496 unsigned long end_pfn = start_pfn + nr_pages;
496 unsigned long pfn; 497 unsigned long pfn;
@@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
514 515
515 mem_blk = find_memory_block_hinted(mem_sect, mem_blk); 516 mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
516 517
517 ret = register_mem_sect_under_node(mem_blk, nid, true); 518 ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
518 if (!err) 519 if (!err)
519 err = ret; 520 err = ret;
520 521
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8075ddc70a17..9460139d9b02 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -572,17 +572,16 @@ static int platform_drv_probe(struct device *_dev)
572 return ret; 572 return ret;
573 573
574 ret = dev_pm_domain_attach(_dev, true); 574 ret = dev_pm_domain_attach(_dev, true);
575 if (ret != -EPROBE_DEFER) { 575 if (ret)
576 if (drv->probe) { 576 goto out;
577 ret = drv->probe(dev); 577
578 if (ret) 578 if (drv->probe) {
579 dev_pm_domain_detach(_dev, true); 579 ret = drv->probe(dev);
580 } else { 580 if (ret)
581 /* don't fail if just dev_pm_domain_attach failed */ 581 dev_pm_domain_detach(_dev, true);
582 ret = 0;
583 }
584 } 582 }
585 583
584out:
586 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { 585 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
587 dev_warn(_dev, "probe deferral not supported\n"); 586 dev_warn(_dev, "probe deferral not supported\n");
588 ret = -ENXIO; 587 ret = -ENXIO;
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index f6a9ad52cbbf..7ae62b6355b8 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -98,17 +98,21 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
98 * Callers must ensure proper synchronization of this function with power 98 * Callers must ensure proper synchronization of this function with power
99 * management callbacks. 99 * management callbacks.
100 * 100 *
101 * Returns 0 on successfully attached PM domain or negative error code. 101 * Returns 0 on successfully attached PM domain, or when it is found that the
102 * device doesn't need a PM domain, else a negative error code.
102 */ 103 */
103int dev_pm_domain_attach(struct device *dev, bool power_on) 104int dev_pm_domain_attach(struct device *dev, bool power_on)
104{ 105{
105 int ret; 106 int ret;
106 107
108 if (dev->pm_domain)
109 return 0;
110
107 ret = acpi_dev_pm_attach(dev, power_on); 111 ret = acpi_dev_pm_attach(dev, power_on);
108 if (ret) 112 if (!ret)
109 ret = genpd_dev_pm_attach(dev); 113 ret = genpd_dev_pm_attach(dev);
110 114
111 return ret; 115 return ret < 0 ? ret : 0;
112} 116}
113EXPORT_SYMBOL_GPL(dev_pm_domain_attach); 117EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
114 118
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 1ea0e2502e8e..6f403d6fccb2 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <linux/pm_opp.h>
13#include <linux/pm_runtime.h> 14#include <linux/pm_runtime.h>
14#include <linux/pm_domain.h> 15#include <linux/pm_domain.h>
15#include <linux/pm_qos.h> 16#include <linux/pm_qos.h>
@@ -1315,7 +1316,6 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1315#endif /* CONFIG_PM_SLEEP */ 1316#endif /* CONFIG_PM_SLEEP */
1316 1317
1317static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, 1318static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1318 struct generic_pm_domain *genpd,
1319 struct gpd_timing_data *td) 1319 struct gpd_timing_data *td)
1320{ 1320{
1321 struct generic_pm_domain_data *gpd_data; 1321 struct generic_pm_domain_data *gpd_data;
@@ -1377,24 +1377,19 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1377 struct gpd_timing_data *td) 1377 struct gpd_timing_data *td)
1378{ 1378{
1379 struct generic_pm_domain_data *gpd_data; 1379 struct generic_pm_domain_data *gpd_data;
1380 int ret = 0; 1380 int ret;
1381 1381
1382 dev_dbg(dev, "%s()\n", __func__); 1382 dev_dbg(dev, "%s()\n", __func__);
1383 1383
1384 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) 1384 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1385 return -EINVAL; 1385 return -EINVAL;
1386 1386
1387 gpd_data = genpd_alloc_dev_data(dev, genpd, td); 1387 gpd_data = genpd_alloc_dev_data(dev, td);
1388 if (IS_ERR(gpd_data)) 1388 if (IS_ERR(gpd_data))
1389 return PTR_ERR(gpd_data); 1389 return PTR_ERR(gpd_data);
1390 1390
1391 genpd_lock(genpd); 1391 genpd_lock(genpd);
1392 1392
1393 if (genpd->prepared_count > 0) {
1394 ret = -EAGAIN;
1395 goto out;
1396 }
1397
1398 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1393 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1399 if (ret) 1394 if (ret)
1400 goto out; 1395 goto out;
@@ -1418,23 +1413,21 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1418} 1413}
1419 1414
1420/** 1415/**
1421 * __pm_genpd_add_device - Add a device to an I/O PM domain. 1416 * pm_genpd_add_device - Add a device to an I/O PM domain.
1422 * @genpd: PM domain to add the device to. 1417 * @genpd: PM domain to add the device to.
1423 * @dev: Device to be added. 1418 * @dev: Device to be added.
1424 * @td: Set of PM QoS timing parameters to attach to the device.
1425 */ 1419 */
1426int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, 1420int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1427 struct gpd_timing_data *td)
1428{ 1421{
1429 int ret; 1422 int ret;
1430 1423
1431 mutex_lock(&gpd_list_lock); 1424 mutex_lock(&gpd_list_lock);
1432 ret = genpd_add_device(genpd, dev, td); 1425 ret = genpd_add_device(genpd, dev, NULL);
1433 mutex_unlock(&gpd_list_lock); 1426 mutex_unlock(&gpd_list_lock);
1434 1427
1435 return ret; 1428 return ret;
1436} 1429}
1437EXPORT_SYMBOL_GPL(__pm_genpd_add_device); 1430EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1438 1431
1439static int genpd_remove_device(struct generic_pm_domain *genpd, 1432static int genpd_remove_device(struct generic_pm_domain *genpd,
1440 struct device *dev) 1433 struct device *dev)
@@ -1481,13 +1474,13 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
1481 1474
1482/** 1475/**
1483 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1476 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1484 * @genpd: PM domain to remove the device from.
1485 * @dev: Device to be removed. 1477 * @dev: Device to be removed.
1486 */ 1478 */
1487int pm_genpd_remove_device(struct generic_pm_domain *genpd, 1479int pm_genpd_remove_device(struct device *dev)
1488 struct device *dev)
1489{ 1480{
1490 if (!genpd || genpd != genpd_lookup_dev(dev)) 1481 struct generic_pm_domain *genpd = genpd_lookup_dev(dev);
1482
1483 if (!genpd)
1491 return -EINVAL; 1484 return -EINVAL;
1492 1485
1493 return genpd_remove_device(genpd, dev); 1486 return genpd_remove_device(genpd, dev);
@@ -1696,6 +1689,9 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
1696 return ret; 1689 return ret;
1697 } 1690 }
1698 1691
1692 device_initialize(&genpd->dev);
1693 dev_set_name(&genpd->dev, "%s", genpd->name);
1694
1699 mutex_lock(&gpd_list_lock); 1695 mutex_lock(&gpd_list_lock);
1700 list_add(&genpd->gpd_list_node, &gpd_list); 1696 list_add(&genpd->gpd_list_node, &gpd_list);
1701 mutex_unlock(&gpd_list_lock); 1697 mutex_unlock(&gpd_list_lock);
@@ -1892,14 +1888,33 @@ int of_genpd_add_provider_simple(struct device_node *np,
1892 1888
1893 mutex_lock(&gpd_list_lock); 1889 mutex_lock(&gpd_list_lock);
1894 1890
1895 if (genpd_present(genpd)) { 1891 if (!genpd_present(genpd))
1896 ret = genpd_add_provider(np, genpd_xlate_simple, genpd); 1892 goto unlock;
1897 if (!ret) { 1893
1898 genpd->provider = &np->fwnode; 1894 genpd->dev.of_node = np;
1899 genpd->has_provider = true; 1895
1896 /* Parse genpd OPP table */
1897 if (genpd->set_performance_state) {
1898 ret = dev_pm_opp_of_add_table(&genpd->dev);
1899 if (ret) {
1900 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
1901 ret);
1902 goto unlock;
1900 } 1903 }
1901 } 1904 }
1902 1905
1906 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1907 if (ret) {
1908 if (genpd->set_performance_state)
1909 dev_pm_opp_of_remove_table(&genpd->dev);
1910
1911 goto unlock;
1912 }
1913
1914 genpd->provider = &np->fwnode;
1915 genpd->has_provider = true;
1916
1917unlock:
1903 mutex_unlock(&gpd_list_lock); 1918 mutex_unlock(&gpd_list_lock);
1904 1919
1905 return ret; 1920 return ret;
@@ -1914,6 +1929,7 @@ EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1914int of_genpd_add_provider_onecell(struct device_node *np, 1929int of_genpd_add_provider_onecell(struct device_node *np,
1915 struct genpd_onecell_data *data) 1930 struct genpd_onecell_data *data)
1916{ 1931{
1932 struct generic_pm_domain *genpd;
1917 unsigned int i; 1933 unsigned int i;
1918 int ret = -EINVAL; 1934 int ret = -EINVAL;
1919 1935
@@ -1926,13 +1942,27 @@ int of_genpd_add_provider_onecell(struct device_node *np,
1926 data->xlate = genpd_xlate_onecell; 1942 data->xlate = genpd_xlate_onecell;
1927 1943
1928 for (i = 0; i < data->num_domains; i++) { 1944 for (i = 0; i < data->num_domains; i++) {
1929 if (!data->domains[i]) 1945 genpd = data->domains[i];
1946
1947 if (!genpd)
1930 continue; 1948 continue;
1931 if (!genpd_present(data->domains[i])) 1949 if (!genpd_present(genpd))
1932 goto error; 1950 goto error;
1933 1951
1934 data->domains[i]->provider = &np->fwnode; 1952 genpd->dev.of_node = np;
1935 data->domains[i]->has_provider = true; 1953
1954 /* Parse genpd OPP table */
1955 if (genpd->set_performance_state) {
1956 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
1957 if (ret) {
1958 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
1959 i, ret);
1960 goto error;
1961 }
1962 }
1963
1964 genpd->provider = &np->fwnode;
1965 genpd->has_provider = true;
1936 } 1966 }
1937 1967
1938 ret = genpd_add_provider(np, data->xlate, data); 1968 ret = genpd_add_provider(np, data->xlate, data);
@@ -1945,10 +1975,16 @@ int of_genpd_add_provider_onecell(struct device_node *np,
1945 1975
1946error: 1976error:
1947 while (i--) { 1977 while (i--) {
1948 if (!data->domains[i]) 1978 genpd = data->domains[i];
1979
1980 if (!genpd)
1949 continue; 1981 continue;
1950 data->domains[i]->provider = NULL; 1982
1951 data->domains[i]->has_provider = false; 1983 genpd->provider = NULL;
1984 genpd->has_provider = false;
1985
1986 if (genpd->set_performance_state)
1987 dev_pm_opp_of_remove_table(&genpd->dev);
1952 } 1988 }
1953 1989
1954 mutex_unlock(&gpd_list_lock); 1990 mutex_unlock(&gpd_list_lock);
@@ -1975,10 +2011,17 @@ void of_genpd_del_provider(struct device_node *np)
1975 * provider, set the 'has_provider' to false 2011 * provider, set the 'has_provider' to false
1976 * so that the PM domain can be safely removed. 2012 * so that the PM domain can be safely removed.
1977 */ 2013 */
1978 list_for_each_entry(gpd, &gpd_list, gpd_list_node) 2014 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1979 if (gpd->provider == &np->fwnode) 2015 if (gpd->provider == &np->fwnode) {
1980 gpd->has_provider = false; 2016 gpd->has_provider = false;
1981 2017
2018 if (!gpd->set_performance_state)
2019 continue;
2020
2021 dev_pm_opp_of_remove_table(&gpd->dev);
2022 }
2023 }
2024
1982 list_del(&cp->link); 2025 list_del(&cp->link);
1983 of_node_put(cp->node); 2026 of_node_put(cp->node);
1984 kfree(cp); 2027 kfree(cp);
@@ -2185,31 +2228,25 @@ static void genpd_dev_pm_sync(struct device *dev)
2185 * Parse device's OF node to find a PM domain specifier. If such is found, 2228 * Parse device's OF node to find a PM domain specifier. If such is found,
2186 * attaches the device to retrieved pm_domain ops. 2229 * attaches the device to retrieved pm_domain ops.
2187 * 2230 *
2188 * Both generic and legacy Samsung-specific DT bindings are supported to keep 2231 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2189 * backwards compatibility with existing DTBs. 2232 * PM domain or a negative error code in case of failures. Note that if a
2190 * 2233 * power-domain exists for the device, but it cannot be found or turned on,
2191 * Returns 0 on successfully attached PM domain or negative error code. Note 2234 * then return -EPROBE_DEFER to ensure that the device is not probed and to
2192 * that if a power-domain exists for the device, but it cannot be found or 2235 * re-try again later.
2193 * turned on, then return -EPROBE_DEFER to ensure that the device is not
2194 * probed and to re-try again later.
2195 */ 2236 */
2196int genpd_dev_pm_attach(struct device *dev) 2237int genpd_dev_pm_attach(struct device *dev)
2197{ 2238{
2198 struct of_phandle_args pd_args; 2239 struct of_phandle_args pd_args;
2199 struct generic_pm_domain *pd; 2240 struct generic_pm_domain *pd;
2200 unsigned int i;
2201 int ret; 2241 int ret;
2202 2242
2203 if (!dev->of_node) 2243 if (!dev->of_node)
2204 return -ENODEV; 2244 return 0;
2205
2206 if (dev->pm_domain)
2207 return -EEXIST;
2208 2245
2209 ret = of_parse_phandle_with_args(dev->of_node, "power-domains", 2246 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2210 "#power-domain-cells", 0, &pd_args); 2247 "#power-domain-cells", 0, &pd_args);
2211 if (ret < 0) 2248 if (ret < 0)
2212 return ret; 2249 return 0;
2213 2250
2214 mutex_lock(&gpd_list_lock); 2251 mutex_lock(&gpd_list_lock);
2215 pd = genpd_get_from_provider(&pd_args); 2252 pd = genpd_get_from_provider(&pd_args);
@@ -2223,21 +2260,14 @@ int genpd_dev_pm_attach(struct device *dev)
2223 2260
2224 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2261 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2225 2262
2226 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { 2263 ret = genpd_add_device(pd, dev, NULL);
2227 ret = genpd_add_device(pd, dev, NULL);
2228 if (ret != -EAGAIN)
2229 break;
2230
2231 mdelay(i);
2232 cond_resched();
2233 }
2234 mutex_unlock(&gpd_list_lock); 2264 mutex_unlock(&gpd_list_lock);
2235 2265
2236 if (ret < 0) { 2266 if (ret < 0) {
2237 if (ret != -EPROBE_DEFER) 2267 if (ret != -EPROBE_DEFER)
2238 dev_err(dev, "failed to add to PM domain %s: %d", 2268 dev_err(dev, "failed to add to PM domain %s: %d",
2239 pd->name, ret); 2269 pd->name, ret);
2240 goto out; 2270 return ret;
2241 } 2271 }
2242 2272
2243 dev->pm_domain->detach = genpd_dev_pm_detach; 2273 dev->pm_domain->detach = genpd_dev_pm_detach;
@@ -2246,8 +2276,11 @@ int genpd_dev_pm_attach(struct device *dev)
2246 genpd_lock(pd); 2276 genpd_lock(pd);
2247 ret = genpd_power_on(pd, 0); 2277 ret = genpd_power_on(pd, 0);
2248 genpd_unlock(pd); 2278 genpd_unlock(pd);
2249out: 2279
2250 return ret ? -EPROBE_DEFER : 0; 2280 if (ret)
2281 genpd_remove_device(pd, dev);
2282
2283 return ret ? -EPROBE_DEFER : 1;
2251} 2284}
2252EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2285EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2253 2286
@@ -2361,6 +2394,55 @@ int of_genpd_parse_idle_states(struct device_node *dn,
2361} 2394}
2362EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); 2395EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2363 2396
2397/**
2398 * of_genpd_opp_to_performance_state- Gets performance state of device's
2399 * power domain corresponding to a DT node's "required-opps" property.
2400 *
2401 * @dev: Device for which the performance-state needs to be found.
2402 * @opp_node: DT node where the "required-opps" property is present. This can be
2403 * the device node itself (if it doesn't have an OPP table) or a node
2404 * within the OPP table of a device (if device has an OPP table).
2405 * @state: Pointer to return performance state.
2406 *
2407 * Returns performance state corresponding to the "required-opps" property of
2408 * a DT node. This calls platform specific genpd->opp_to_performance_state()
2409 * callback to translate power domain OPP to performance state.
2410 *
2411 * Returns performance state on success and 0 on failure.
2412 */
2413unsigned int of_genpd_opp_to_performance_state(struct device *dev,
2414 struct device_node *opp_node)
2415{
2416 struct generic_pm_domain *genpd;
2417 struct dev_pm_opp *opp;
2418 int state = 0;
2419
2420 genpd = dev_to_genpd(dev);
2421 if (IS_ERR(genpd))
2422 return 0;
2423
2424 if (unlikely(!genpd->set_performance_state))
2425 return 0;
2426
2427 genpd_lock(genpd);
2428
2429 opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
2430 if (IS_ERR(opp)) {
2431 dev_err(dev, "Failed to find required OPP: %ld\n",
2432 PTR_ERR(opp));
2433 goto unlock;
2434 }
2435
2436 state = genpd->opp_to_performance_state(genpd, opp);
2437 dev_pm_opp_put(opp);
2438
2439unlock:
2440 genpd_unlock(genpd);
2441
2442 return state;
2443}
2444EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
2445
2364#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ 2446#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
2365 2447
2366 2448
@@ -2628,6 +2710,19 @@ static int genpd_devices_show(struct seq_file *s, void *data)
2628 return ret; 2710 return ret;
2629} 2711}
2630 2712
2713static int genpd_perf_state_show(struct seq_file *s, void *data)
2714{
2715 struct generic_pm_domain *genpd = s->private;
2716
2717 if (genpd_lock_interruptible(genpd))
2718 return -ERESTARTSYS;
2719
2720 seq_printf(s, "%u\n", genpd->performance_state);
2721
2722 genpd_unlock(genpd);
2723 return 0;
2724}
2725
2631#define define_genpd_open_function(name) \ 2726#define define_genpd_open_function(name) \
2632static int genpd_##name##_open(struct inode *inode, struct file *file) \ 2727static int genpd_##name##_open(struct inode *inode, struct file *file) \
2633{ \ 2728{ \
@@ -2641,6 +2736,7 @@ define_genpd_open_function(idle_states);
2641define_genpd_open_function(active_time); 2736define_genpd_open_function(active_time);
2642define_genpd_open_function(total_idle_time); 2737define_genpd_open_function(total_idle_time);
2643define_genpd_open_function(devices); 2738define_genpd_open_function(devices);
2739define_genpd_open_function(perf_state);
2644 2740
2645#define define_genpd_debugfs_fops(name) \ 2741#define define_genpd_debugfs_fops(name) \
2646static const struct file_operations genpd_##name##_fops = { \ 2742static const struct file_operations genpd_##name##_fops = { \
@@ -2657,6 +2753,7 @@ define_genpd_debugfs_fops(idle_states);
2657define_genpd_debugfs_fops(active_time); 2753define_genpd_debugfs_fops(active_time);
2658define_genpd_debugfs_fops(total_idle_time); 2754define_genpd_debugfs_fops(total_idle_time);
2659define_genpd_debugfs_fops(devices); 2755define_genpd_debugfs_fops(devices);
2756define_genpd_debugfs_fops(perf_state);
2660 2757
2661static int __init genpd_debug_init(void) 2758static int __init genpd_debug_init(void)
2662{ 2759{
@@ -2690,6 +2787,9 @@ static int __init genpd_debug_init(void)
2690 d, genpd, &genpd_total_idle_time_fops); 2787 d, genpd, &genpd_total_idle_time_fops);
2691 debugfs_create_file("devices", 0444, 2788 debugfs_create_file("devices", 0444,
2692 d, genpd, &genpd_devices_fops); 2789 d, genpd, &genpd_devices_fops);
2790 if (genpd->set_performance_state)
2791 debugfs_create_file("perf_state", 0444,
2792 d, genpd, &genpd_perf_state_fops);
2693 } 2793 }
2694 2794
2695 return 0; 2795 return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 02a497e7c785..3f68e2919dc5 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -192,34 +192,31 @@ void device_pm_move_last(struct device *dev)
192 list_move_tail(&dev->power.entry, &dpm_list); 192 list_move_tail(&dev->power.entry, &dpm_list);
193} 193}
194 194
195static ktime_t initcall_debug_start(struct device *dev) 195static ktime_t initcall_debug_start(struct device *dev, void *cb)
196{ 196{
197 ktime_t calltime = 0; 197 if (!pm_print_times_enabled)
198 198 return 0;
199 if (pm_print_times_enabled) {
200 pr_info("calling %s+ @ %i, parent: %s\n",
201 dev_name(dev), task_pid_nr(current),
202 dev->parent ? dev_name(dev->parent) : "none");
203 calltime = ktime_get();
204 }
205 199
206 return calltime; 200 dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
201 task_pid_nr(current),
202 dev->parent ? dev_name(dev->parent) : "none");
203 return ktime_get();
207} 204}
208 205
209static void initcall_debug_report(struct device *dev, ktime_t calltime, 206static void initcall_debug_report(struct device *dev, ktime_t calltime,
210 int error, pm_message_t state, 207 void *cb, int error)
211 const char *info)
212{ 208{
213 ktime_t rettime; 209 ktime_t rettime;
214 s64 nsecs; 210 s64 nsecs;
215 211
212 if (!pm_print_times_enabled)
213 return;
214
216 rettime = ktime_get(); 215 rettime = ktime_get();
217 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); 216 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
218 217
219 if (pm_print_times_enabled) { 218 dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
220 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 219 (unsigned long long)nsecs >> 10);
221 error, (unsigned long long)nsecs >> 10);
222 }
223} 220}
224 221
225/** 222/**
@@ -446,7 +443,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
446 if (!cb) 443 if (!cb)
447 return 0; 444 return 0;
448 445
449 calltime = initcall_debug_start(dev); 446 calltime = initcall_debug_start(dev, cb);
450 447
451 pm_dev_dbg(dev, state, info); 448 pm_dev_dbg(dev, state, info);
452 trace_device_pm_callback_start(dev, info, state.event); 449 trace_device_pm_callback_start(dev, info, state.event);
@@ -454,7 +451,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
454 trace_device_pm_callback_end(dev, error); 451 trace_device_pm_callback_end(dev, error);
455 suspend_report_result(cb, error); 452 suspend_report_result(cb, error);
456 453
457 initcall_debug_report(dev, calltime, error, state, info); 454 initcall_debug_report(dev, calltime, cb, error);
458 455
459 return error; 456 return error;
460} 457}
@@ -1664,14 +1661,14 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
1664 int error; 1661 int error;
1665 ktime_t calltime; 1662 ktime_t calltime;
1666 1663
1667 calltime = initcall_debug_start(dev); 1664 calltime = initcall_debug_start(dev, cb);
1668 1665
1669 trace_device_pm_callback_start(dev, info, state.event); 1666 trace_device_pm_callback_start(dev, info, state.event);
1670 error = cb(dev, state); 1667 error = cb(dev, state);
1671 trace_device_pm_callback_end(dev, error); 1668 trace_device_pm_callback_end(dev, error);
1672 suspend_report_result(cb, error); 1669 suspend_report_result(cb, error);
1673 1670
1674 initcall_debug_report(dev, calltime, error, state, info); 1671 initcall_debug_report(dev, calltime, cb, error);
1675 1672
1676 return error; 1673 return error;
1677} 1674}
@@ -1923,10 +1920,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
1923 1920
1924 dev->power.wakeup_path = false; 1921 dev->power.wakeup_path = false;
1925 1922
1926 if (dev->power.no_pm_callbacks) { 1923 if (dev->power.no_pm_callbacks)
1927 ret = 1; /* Let device go direct_complete */
1928 goto unlock; 1924 goto unlock;
1929 }
1930 1925
1931 if (dev->pm_domain) 1926 if (dev->pm_domain)
1932 callback = dev->pm_domain->ops.prepare; 1927 callback = dev->pm_domain->ops.prepare;
@@ -1960,7 +1955,8 @@ unlock:
1960 */ 1955 */
1961 spin_lock_irq(&dev->power.lock); 1956 spin_lock_irq(&dev->power.lock);
1962 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && 1957 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1963 pm_runtime_suspended(dev) && ret > 0 && 1958 ((pm_runtime_suspended(dev) && ret > 0) ||
1959 dev->power.no_pm_callbacks) &&
1964 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); 1960 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1965 spin_unlock_irq(&dev->power.lock); 1961 spin_unlock_irq(&dev->power.lock);
1966 return 0; 1962 return 0;
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 86e67e70b509..c511def48b48 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -56,14 +56,6 @@ static inline void device_wakeup_detach_irq(struct device *dev)
56{ 56{
57} 57}
58 58
59static inline void device_wakeup_arm_wake_irqs(void)
60{
61}
62
63static inline void device_wakeup_disarm_wake_irqs(void)
64{
65}
66
67#endif /* CONFIG_PM_SLEEP */ 59#endif /* CONFIG_PM_SLEEP */
68 60
69/* 61/*
@@ -95,28 +87,6 @@ static inline void pm_runtime_remove(struct device *dev) {}
95 87
96static inline int dpm_sysfs_add(struct device *dev) { return 0; } 88static inline int dpm_sysfs_add(struct device *dev) { return 0; }
97static inline void dpm_sysfs_remove(struct device *dev) {} 89static inline void dpm_sysfs_remove(struct device *dev) {}
98static inline void rpm_sysfs_remove(struct device *dev) {}
99static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
100static inline void wakeup_sysfs_remove(struct device *dev) {}
101static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
102static inline void pm_qos_sysfs_remove(struct device *dev) {}
103
104static inline void dev_pm_arm_wake_irq(struct wake_irq *wirq)
105{
106}
107
108static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
109{
110}
111
112static inline void dev_pm_enable_wake_irq_check(struct device *dev,
113 bool can_change_status)
114{
115}
116
117static inline void dev_pm_disable_wake_irq_check(struct device *dev)
118{
119}
120 90
121#endif 91#endif
122 92
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8bef3cb2424d..c6030f100c08 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1563,37 +1563,16 @@ void pm_runtime_clean_up_links(struct device *dev)
1563} 1563}
1564 1564
1565/** 1565/**
1566 * pm_runtime_get_suppliers - Resume and reference-count supplier devices. 1566 * pm_runtime_resume_suppliers - Resume supplier devices.
1567 * @dev: Consumer device. 1567 * @dev: Consumer device.
1568 */ 1568 */
1569void pm_runtime_get_suppliers(struct device *dev) 1569void pm_runtime_resume_suppliers(struct device *dev)
1570{ 1570{
1571 struct device_link *link;
1572 int idx;
1573
1574 idx = device_links_read_lock();
1575
1576 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1577 if (link->flags & DL_FLAG_PM_RUNTIME)
1578 pm_runtime_get_sync(link->supplier);
1579
1580 device_links_read_unlock(idx);
1581}
1582
1583/**
1584 * pm_runtime_put_suppliers - Drop references to supplier devices.
1585 * @dev: Consumer device.
1586 */
1587void pm_runtime_put_suppliers(struct device *dev)
1588{
1589 struct device_link *link;
1590 int idx; 1571 int idx;
1591 1572
1592 idx = device_links_read_lock(); 1573 idx = device_links_read_lock();
1593 1574
1594 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) 1575 rpm_get_suppliers(dev);
1595 if (link->flags & DL_FLAG_PM_RUNTIME)
1596 pm_runtime_put(link->supplier);
1597 1576
1598 device_links_read_unlock(idx); 1577 device_links_read_unlock(idx);
1599} 1578}
@@ -1607,6 +1586,8 @@ void pm_runtime_new_link(struct device *dev)
1607 1586
1608void pm_runtime_drop_link(struct device *dev) 1587void pm_runtime_drop_link(struct device *dev)
1609{ 1588{
1589 rpm_put_suppliers(dev);
1590
1610 spin_lock_irq(&dev->power.lock); 1591 spin_lock_irq(&dev->power.lock);
1611 WARN_ON(dev->power.links_count == 0); 1592 WARN_ON(dev->power.links_count == 0);
1612 dev->power.links_count--; 1593 dev->power.links_count--;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index ea01621ed769..e1322788eaee 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -183,7 +183,6 @@ void wakeup_source_add(struct wakeup_source *ws)
183 spin_lock_init(&ws->lock); 183 spin_lock_init(&ws->lock);
184 timer_setup(&ws->timer, pm_wakeup_timer_fn, 0); 184 timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
185 ws->active = false; 185 ws->active = false;
186 ws->last_time = ktime_get();
187 186
188 spin_lock_irqsave(&events_lock, flags); 187 spin_lock_irqsave(&events_lock, flags);
189 list_add_rcu(&ws->entry, &wakeup_sources); 188 list_add_rcu(&ws->entry, &wakeup_sources);
@@ -854,7 +853,7 @@ bool pm_wakeup_pending(void)
854 spin_unlock_irqrestore(&events_lock, flags); 853 spin_unlock_irqrestore(&events_lock, flags);
855 854
856 if (ret) { 855 if (ret) {
857 pr_info("PM: Wakeup pending, aborting suspend\n"); 856 pr_debug("PM: Wakeup pending, aborting suspend\n");
858 pm_print_active_wakeup_sources(); 857 pm_print_active_wakeup_sources();
859 } 858 }
860 859
@@ -1029,32 +1028,75 @@ static int print_wakeup_source_stats(struct seq_file *m,
1029 return 0; 1028 return 0;
1030} 1029}
1031 1030
1032/** 1031static void *wakeup_sources_stats_seq_start(struct seq_file *m,
1033 * wakeup_sources_stats_show - Print wakeup sources statistics information. 1032 loff_t *pos)
1034 * @m: seq_file to print the statistics into.
1035 */
1036static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
1037{ 1033{
1038 struct wakeup_source *ws; 1034 struct wakeup_source *ws;
1039 int srcuidx; 1035 loff_t n = *pos;
1036 int *srcuidx = m->private;
1040 1037
1041 seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" 1038 if (n == 0) {
1042 "expire_count\tactive_since\ttotal_time\tmax_time\t" 1039 seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1043 "last_change\tprevent_suspend_time\n"); 1040 "expire_count\tactive_since\ttotal_time\tmax_time\t"
1041 "last_change\tprevent_suspend_time\n");
1042 }
1044 1043
1045 srcuidx = srcu_read_lock(&wakeup_srcu); 1044 *srcuidx = srcu_read_lock(&wakeup_srcu);
1046 list_for_each_entry_rcu(ws, &wakeup_sources, entry) 1045 list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
1047 print_wakeup_source_stats(m, ws); 1046 if (n-- <= 0)
1048 srcu_read_unlock(&wakeup_srcu, srcuidx); 1047 return ws;
1048 }
1049
1050 return NULL;
1051}
1052
1053static void *wakeup_sources_stats_seq_next(struct seq_file *m,
1054 void *v, loff_t *pos)
1055{
1056 struct wakeup_source *ws = v;
1057 struct wakeup_source *next_ws = NULL;
1049 1058
1050 print_wakeup_source_stats(m, &deleted_ws); 1059 ++(*pos);
1060
1061 list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
1062 next_ws = ws;
1063 break;
1064 }
1065
1066 return next_ws;
1067}
1068
1069static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
1070{
1071 int *srcuidx = m->private;
1072
1073 srcu_read_unlock(&wakeup_srcu, *srcuidx);
1074}
1075
1076/**
1077 * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
1078 * @m: seq_file to print the statistics into.
1079 * @v: wakeup_source of each iteration
1080 */
1081static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
1082{
1083 struct wakeup_source *ws = v;
1084
1085 print_wakeup_source_stats(m, ws);
1051 1086
1052 return 0; 1087 return 0;
1053} 1088}
1054 1089
1090static const struct seq_operations wakeup_sources_stats_seq_ops = {
1091 .start = wakeup_sources_stats_seq_start,
1092 .next = wakeup_sources_stats_seq_next,
1093 .stop = wakeup_sources_stats_seq_stop,
1094 .show = wakeup_sources_stats_seq_show,
1095};
1096
1055static int wakeup_sources_stats_open(struct inode *inode, struct file *file) 1097static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1056{ 1098{
1057 return single_open(file, wakeup_sources_stats_show, NULL); 1099 return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
1058} 1100}
1059 1101
1060static const struct file_operations wakeup_sources_stats_fops = { 1102static const struct file_operations wakeup_sources_stats_fops = {
@@ -1062,7 +1104,7 @@ static const struct file_operations wakeup_sources_stats_fops = {
1062 .open = wakeup_sources_stats_open, 1104 .open = wakeup_sources_stats_open,
1063 .read = seq_read, 1105 .read = seq_read,
1064 .llseek = seq_lseek, 1106 .llseek = seq_lseek,
1065 .release = single_release, 1107 .release = seq_release_private,
1066}; 1108};
1067 1109
1068static int __init wakeup_sources_debugfs_init(void) 1110static int __init wakeup_sources_debugfs_init(void)
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index f040aba48d50..27e9686b6d3a 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
184{ 184{
185 int i; 185 int i;
186 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; 186 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
187 char interrupts[20]; 187 char interrupts[25];
188 char *ints = interrupts; 188 char *ints = interrupts;
189 189
190 for (i = 0; i < ARRAY_SIZE(irq_name); i++) 190 for (i = 0; i < ARRAY_SIZE(irq_name); i++)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5d4e31655d96..55cf554bc914 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1068,6 +1068,7 @@ static int loop_clr_fd(struct loop_device *lo)
1068 if (bdev) { 1068 if (bdev) {
1069 bdput(bdev); 1069 bdput(bdev);
1070 invalidate_bdev(bdev); 1070 invalidate_bdev(bdev);
1071 bdev->bd_inode->i_mapping->wb_err = 0;
1071 } 1072 }
1072 set_capacity(lo->lo_disk, 0); 1073 set_capacity(lo->lo_disk, 0);
1073 loop_sysfs_exit(lo); 1074 loop_sysfs_exit(lo);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 41492e980ef4..34968a381d0f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -266,15 +266,13 @@ config COMMON_CLK_STM32MP157
266 Support for stm32mp157 SoC family clocks 266 Support for stm32mp157 SoC family clocks
267 267
268config COMMON_CLK_STM32F 268config COMMON_CLK_STM32F
269 bool "Clock driver for stm32f4 and stm32f7 SoC families" 269 def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
270 depends on MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746
271 help 270 help
272 ---help--- 271 ---help---
273 Support for stm32f4 and stm32f7 SoC families clocks 272 Support for stm32f4 and stm32f7 SoC families clocks
274 273
275config COMMON_CLK_STM32H7 274config COMMON_CLK_STM32H7
276 bool "Clock driver for stm32h7 SoC family" 275 def_bool COMMON_CLK && MACH_STM32H743
277 depends on MACH_STM32H743
278 help 276 help
279 ---help--- 277 ---help---
280 Support for stm32h7 SoC family clocks 278 Support for stm32h7 SoC family clocks
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 114ecbb94ec5..12320118f8de 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -464,7 +464,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
464 clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); 464 clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
465 465
466 /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ 466 /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
467 clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); 467 clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
468 clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); 468 clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
469 clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); 469 clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
470 clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); 470 clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index d4a81be0d7d2..b6be62025325 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -152,8 +152,8 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; 152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); 153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
154 154
155 memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 155 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
157 157
158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, 158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); 159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d29275b97e84..4a828c18099a 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -524,6 +524,14 @@ static int bam_alloc_chan(struct dma_chan *chan)
524 return 0; 524 return 0;
525} 525}
526 526
527static int bam_pm_runtime_get_sync(struct device *dev)
528{
529 if (pm_runtime_enabled(dev))
530 return pm_runtime_get_sync(dev);
531
532 return 0;
533}
534
527/** 535/**
528 * bam_free_chan - Frees dma resources associated with specific channel 536 * bam_free_chan - Frees dma resources associated with specific channel
529 * @chan: specified channel 537 * @chan: specified channel
@@ -539,7 +547,7 @@ static void bam_free_chan(struct dma_chan *chan)
539 unsigned long flags; 547 unsigned long flags;
540 int ret; 548 int ret;
541 549
542 ret = pm_runtime_get_sync(bdev->dev); 550 ret = bam_pm_runtime_get_sync(bdev->dev);
543 if (ret < 0) 551 if (ret < 0)
544 return; 552 return;
545 553
@@ -720,7 +728,7 @@ static int bam_pause(struct dma_chan *chan)
720 unsigned long flag; 728 unsigned long flag;
721 int ret; 729 int ret;
722 730
723 ret = pm_runtime_get_sync(bdev->dev); 731 ret = bam_pm_runtime_get_sync(bdev->dev);
724 if (ret < 0) 732 if (ret < 0)
725 return ret; 733 return ret;
726 734
@@ -746,7 +754,7 @@ static int bam_resume(struct dma_chan *chan)
746 unsigned long flag; 754 unsigned long flag;
747 int ret; 755 int ret;
748 756
749 ret = pm_runtime_get_sync(bdev->dev); 757 ret = bam_pm_runtime_get_sync(bdev->dev);
750 if (ret < 0) 758 if (ret < 0)
751 return ret; 759 return ret;
752 760
@@ -852,7 +860,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
852 if (srcs & P_IRQ) 860 if (srcs & P_IRQ)
853 tasklet_schedule(&bdev->task); 861 tasklet_schedule(&bdev->task);
854 862
855 ret = pm_runtime_get_sync(bdev->dev); 863 ret = bam_pm_runtime_get_sync(bdev->dev);
856 if (ret < 0) 864 if (ret < 0)
857 return ret; 865 return ret;
858 866
@@ -969,7 +977,7 @@ static void bam_start_dma(struct bam_chan *bchan)
969 if (!vd) 977 if (!vd)
970 return; 978 return;
971 979
972 ret = pm_runtime_get_sync(bdev->dev); 980 ret = bam_pm_runtime_get_sync(bdev->dev);
973 if (ret < 0) 981 if (ret < 0)
974 return; 982 return;
975 983
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 14b147135a0c..2455be8cbc4f 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -778,6 +778,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
778 if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) { 778 if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
779 dev_err(&sdev->dev, "failed to setup transport\n"); 779 dev_err(&sdev->dev, "failed to setup transport\n");
780 scmi_device_destroy(sdev); 780 scmi_device_destroy(sdev);
781 return;
781 } 782 }
782 783
783 /* setup handle now as the transport is ready */ 784 /* setup handle now as the transport is ready */
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index b9bd827caa22..1b4d465cc5d9 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -98,6 +98,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
98 (phys_seed >> 32) & mask : TEXT_OFFSET; 98 (phys_seed >> 32) & mask : TEXT_OFFSET;
99 99
100 /* 100 /*
101 * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
102 * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
103 * we preserve the misalignment of 'offset' relative to
104 * EFI_KIMG_ALIGN so that statically allocated objects whose
105 * alignment exceeds PAGE_SIZE appear correctly aligned in
106 * memory.
107 */
108 offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
109
110 /*
101 * If KASLR is enabled, and we have some randomness available, 111 * If KASLR is enabled, and we have some randomness available,
102 * locate the kernel at a randomized offset in physical memory. 112 * locate the kernel at a randomized offset in physical memory.
103 */ 113 */
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index dfbd894d5bb7..4e24e591ae74 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr)
147 "smc #0 @ switch to secure world\n" 147 "smc #0 @ switch to secure world\n"
148 : "=r" (r0) 148 : "=r" (r0)
149 : "r" (r0), "r" (r1), "r" (r2) 149 : "r" (r0), "r" (r1), "r" (r2)
150 : "r3"); 150 : "r3", "r12");
151 } while (r0 == QCOM_SCM_INTERRUPTED); 151 } while (r0 == QCOM_SCM_INTERRUPTED);
152 152
153 return r0; 153 return r0;
@@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
263 "smc #0 @ switch to secure world\n" 263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0) 264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2) 265 : "r" (r0), "r" (r1), "r" (r2)
266 : "r3"); 266 : "r3", "r12");
267 return r0; 267 return r0;
268} 268}
269 269
@@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
298 "smc #0 @ switch to secure world\n" 298 "smc #0 @ switch to secure world\n"
299 : "=r" (r0) 299 : "=r" (r0)
300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3) 300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
301 ); 301 : "r12");
302 return r0; 302 return r0;
303} 303}
304 304
@@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void)
328 "smc #0 @ switch to secure world\n" 328 "smc #0 @ switch to secure world\n"
329 : "=r" (r0), "=r" (r1) 329 : "=r" (r0), "=r" (r1)
330 : "r" (r0), "r" (r1) 330 : "r" (r0), "r" (r1)
331 : "r2", "r3"); 331 : "r2", "r3", "r12");
332 } while (r0 == QCOM_SCM_INTERRUPTED); 332 } while (r0 == QCOM_SCM_INTERRUPTED);
333 333
334 version = r1; 334 version = r1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index a29362f9ef41..12558044acd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -513,7 +513,7 @@ static int acp_hw_fini(void *handle)
513 if (adev->acp.acp_genpd) { 513 if (adev->acp.acp_genpd) {
514 for (i = 0; i < ACP_DEVS ; i++) { 514 for (i = 0; i < ACP_DEVS ; i++) {
515 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 515 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
516 ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); 516 ret = pm_genpd_remove_device(dev);
517 /* If removal fails, dont giveup and try rest */ 517 /* If removal fails, dont giveup and try rest */
518 if (ret) 518 if (ret)
519 dev_err(dev, "remove dev from genpd failed\n"); 519 dev_err(dev, "remove dev from genpd failed\n");
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a1b9338736e3..c2c21d839727 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev)
716 if (!minor) 716 if (!minor)
717 return; 717 return;
718 718
719 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index); 719 name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
720 if (!name) 720 if (!name)
721 return; 721 return;
722 722
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 39ac15ce4702..9e2ae02f31e0 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
65 return -EINVAL; 65 return -EINVAL;
66 66
67 /* overflow checks for 32bit size calculations */ 67 /* overflow checks for 32bit size calculations */
68 /* NOTE: DIV_ROUND_UP() can overflow */ 68 if (args->bpp > U32_MAX - 8)
69 return -EINVAL;
69 cpp = DIV_ROUND_UP(args->bpp, 8); 70 cpp = DIV_ROUND_UP(args->bpp, 8);
70 if (!cpp || cpp > 0xffffffffU / args->width) 71 if (cpp > U32_MAX / args->width)
71 return -EINVAL; 72 return -EINVAL;
72 stride = cpp * args->width; 73 stride = cpp * args->width;
73 if (args->height > 0xffffffffU / stride) 74 if (args->height > U32_MAX / stride)
74 return -EINVAL; 75 return -EINVAL;
75 76
76 /* test for wrap-around */ 77 /* test for wrap-around */
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index e394799979a6..6d9b9453707c 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
212 return -ENOMEM; 212 return -ENOMEM;
213 213
214 filp->private_data = priv; 214 filp->private_data = priv;
215 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
215 priv->filp = filp; 216 priv->filp = filp;
216 priv->pid = get_pid(task_pid(current)); 217 priv->pid = get_pid(task_pid(current));
217 priv->minor = minor; 218 priv->minor = minor;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d596a8302ca3..854bd51b9478 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -778,6 +778,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
778 I915_USERPTR_UNSYNCHRONIZED)) 778 I915_USERPTR_UNSYNCHRONIZED))
779 return -EINVAL; 779 return -EINVAL;
780 780
781 if (!args->user_size)
782 return -EINVAL;
783
781 if (offset_in_page(args->user_ptr | args->user_size)) 784 if (offset_in_page(args->user_ptr | args->user_size))
782 return -EINVAL; 785 return -EINVAL;
783 786
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e6a8c0ee7df1..8a69a9275e28 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7326,6 +7326,9 @@ enum {
7326#define SLICE_ECO_CHICKEN0 _MMIO(0x7308) 7326#define SLICE_ECO_CHICKEN0 _MMIO(0x7308)
7327#define PIXEL_MASK_CAMMING_DISABLE (1 << 14) 7327#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
7328 7328
7329#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
7330#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
7331
7329/* WaCatErrorRejectionIssue */ 7332/* WaCatErrorRejectionIssue */
7330#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) 7333#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030)
7331#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 7334#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4ba139c27fba..f7c25828d3bb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1149,6 +1149,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
1149 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, 1149 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
1150 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); 1150 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
1151 1151
1152 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
1153 if (IS_GEN9_LP(dev_priv))
1154 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
1155
1152 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ 1156 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1153 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG); 1157 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
1154 if (ret) 1158 if (ret)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e3a5f673ff67..8704f7f8d072 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -884,6 +884,7 @@ static void execlists_submission_tasklet(unsigned long data)
884 884
885 head = execlists->csb_head; 885 head = execlists->csb_head;
886 tail = READ_ONCE(buf[write_idx]); 886 tail = READ_ONCE(buf[write_idx]);
887 rmb(); /* Hopefully paired with a wmb() in HW */
887 } 888 }
888 GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", 889 GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
889 engine->name, 890 engine->name,
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3d2d3bbd1342..155ad840f3c5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
88 const struct drm_display_mode *panel_mode; 88 const struct drm_display_mode *panel_mode;
89 struct drm_crtc_state *crtc_state; 89 struct drm_crtc_state *crtc_state;
90 90
91 if (!state->crtc)
92 return 0;
93
91 if (list_empty(&connector->modes)) { 94 if (list_empty(&connector->modes)) {
92 dev_dbg(lvds->dev, "connector: empty modes list\n"); 95 dev_dbg(lvds->dev, "connector: empty modes list\n");
93 return -EINVAL; 96 return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 94b99c90425a..7c95ed5c5cac 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -130,6 +130,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
130 struct vc4_file *vc4file = file->driver_priv; 130 struct vc4_file *vc4file = file->driver_priv;
131 131
132 vc4_perfmon_close_file(vc4file); 132 vc4_perfmon_close_file(vc4file);
133 kfree(vc4file);
133} 134}
134 135
135static const struct vm_operations_struct vc4_vm_ops = { 136static const struct vm_operations_struct vc4_vm_ops = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 70e1a8820a7c..8b770a8e02cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
1278 dev_priv->active_master = &dev_priv->fbdev_master; 1278 dev_priv->active_master = &dev_priv->fbdev_master;
1279 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1279 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1280 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 1280 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1281
1282 vmw_fb_refresh(dev_priv);
1283} 1281}
1284 1282
1285/** 1283/**
@@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
1483 vmw_kms_resume(dev); 1481 vmw_kms_resume(dev);
1484 if (dev_priv->enable_fb) 1482 if (dev_priv->enable_fb)
1485 vmw_fb_on(dev_priv); 1483 vmw_fb_on(dev_priv);
1486 vmw_fb_refresh(dev_priv);
1487 return -EBUSY; 1484 return -EBUSY;
1488 } 1485 }
1489 1486
@@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
1523 if (dev_priv->enable_fb) 1520 if (dev_priv->enable_fb)
1524 vmw_fb_on(dev_priv); 1521 vmw_fb_on(dev_priv);
1525 1522
1526 vmw_fb_refresh(dev_priv);
1527
1528 return 0; 1523 return 0;
1529} 1524}
1530 1525
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index f34f368c1a2e..5fcbe1620d50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
910int vmw_fb_close(struct vmw_private *dev_priv); 910int vmw_fb_close(struct vmw_private *dev_priv);
911int vmw_fb_off(struct vmw_private *vmw_priv); 911int vmw_fb_off(struct vmw_private *vmw_priv);
912int vmw_fb_on(struct vmw_private *vmw_priv); 912int vmw_fb_on(struct vmw_private *vmw_priv);
913void vmw_fb_refresh(struct vmw_private *vmw_priv);
914 913
915/** 914/**
916 * Kernel modesetting - vmwgfx_kms.c 915 * Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ba0cdb743c3e..54e300365a5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
866 spin_lock_irqsave(&par->dirty.lock, flags); 866 spin_lock_irqsave(&par->dirty.lock, flags);
867 par->dirty.active = true; 867 par->dirty.active = true;
868 spin_unlock_irqrestore(&par->dirty.lock, flags); 868 spin_unlock_irqrestore(&par->dirty.lock, flags);
869
870 return 0;
871}
872 869
873/** 870 /*
874 * vmw_fb_refresh - Refresh fb display 871 * Need to reschedule a dirty update, because otherwise that's
875 * 872 * only done in dirty_mark() if the previous coalesced
876 * @vmw_priv: Pointer to device private 873 * dirty region was empty.
877 * 874 */
878 * Call into kms to show the fbdev display(s). 875 schedule_delayed_work(&par->local_work, 0);
879 */
880void vmw_fb_refresh(struct vmw_private *vmw_priv)
881{
882 if (!vmw_priv->fb_info)
883 return;
884 876
885 vmw_fb_set_par(vmw_priv->fb_info); 877 return 0;
886} 878}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index cdff99211602..21d746bdc922 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
329 struct rpc_channel channel; 329 struct rpc_channel channel;
330 char *msg, *reply = NULL; 330 char *msg, *reply = NULL;
331 size_t reply_len = 0; 331 size_t reply_len = 0;
332 int ret = 0;
333
334 332
335 if (!vmw_msg_enabled) 333 if (!vmw_msg_enabled)
336 return -ENODEV; 334 return -ENODEV;
@@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
344 return -ENOMEM; 342 return -ENOMEM;
345 } 343 }
346 344
347 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || 345 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
348 vmw_send_msg(&channel, msg) || 346 goto out_open;
349 vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
350 vmw_close_channel(&channel)) {
351 DRM_ERROR("Failed to get %s", guest_info_param);
352 347
353 ret = -EINVAL; 348 if (vmw_send_msg(&channel, msg) ||
354 } 349 vmw_recv_msg(&channel, (void *) &reply, &reply_len))
350 goto out_msg;
355 351
352 vmw_close_channel(&channel);
356 if (buffer && reply && reply_len > 0) { 353 if (buffer && reply && reply_len > 0) {
357 /* Remove reply code, which are the first 2 characters of 354 /* Remove reply code, which are the first 2 characters of
358 * the reply 355 * the reply
@@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
369 kfree(reply); 366 kfree(reply);
370 kfree(msg); 367 kfree(msg);
371 368
372 return ret; 369 return 0;
370
371out_msg:
372 vmw_close_channel(&channel);
373 kfree(reply);
374out_open:
375 *length = 0;
376 kfree(msg);
377 DRM_ERROR("Failed to get %s", guest_info_param);
378
379 return -EINVAL;
373} 380}
374 381
375 382
@@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
400 return -ENOMEM; 407 return -ENOMEM;
401 } 408 }
402 409
403 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || 410 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
404 vmw_send_msg(&channel, msg) || 411 goto out_open;
405 vmw_close_channel(&channel)) {
406 DRM_ERROR("Failed to send log\n");
407 412
408 ret = -EINVAL; 413 if (vmw_send_msg(&channel, msg))
409 } 414 goto out_msg;
410 415
416 vmw_close_channel(&channel);
411 kfree(msg); 417 kfree(msg);
412 418
413 return ret; 419 return 0;
420
421out_msg:
422 vmw_close_channel(&channel);
423out_open:
424 kfree(msg);
425 DRM_ERROR("Failed to send log\n");
426
427 return -EINVAL;
414} 428}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033fb610..8545488aa0cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
135 135
136#else 136#else
137 137
138/* In the 32-bit version of this macro, we use "m" because there is no 138/*
139 * more register left for bp 139 * In the 32-bit version of this macro, we store bp in a memory location
140 * because we've ran out of registers.
141 * Now we can't reference that memory location while we've modified
142 * %esp or %ebp, so we first push it on the stack, just before we push
143 * %ebp, and then when we need it we read it from the stack where we
144 * just pushed it.
140 */ 145 */
141#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ 146#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
142 port_num, magic, bp, \ 147 port_num, magic, bp, \
143 eax, ebx, ecx, edx, si, di) \ 148 eax, ebx, ecx, edx, si, di) \
144({ \ 149({ \
145 asm volatile ("push %%ebp;" \ 150 asm volatile ("push %12;" \
146 "mov %12, %%ebp;" \ 151 "push %%ebp;" \
152 "mov 0x04(%%esp), %%ebp;" \
147 "rep outsb;" \ 153 "rep outsb;" \
148 "pop %%ebp;" : \ 154 "pop %%ebp;" \
155 "add $0x04, %%esp;" : \
149 "=a"(eax), \ 156 "=a"(eax), \
150 "=b"(ebx), \ 157 "=b"(ebx), \
151 "=c"(ecx), \ 158 "=c"(ecx), \
@@ -167,10 +174,12 @@
167 port_num, magic, bp, \ 174 port_num, magic, bp, \
168 eax, ebx, ecx, edx, si, di) \ 175 eax, ebx, ecx, edx, si, di) \
169({ \ 176({ \
170 asm volatile ("push %%ebp;" \ 177 asm volatile ("push %12;" \
171 "mov %12, %%ebp;" \ 178 "push %%ebp;" \
179 "mov 0x04(%%esp), %%ebp;" \
172 "rep insb;" \ 180 "rep insb;" \
173 "pop %%ebp" : \ 181 "pop %%ebp;" \
182 "add $0x04, %%esp;" : \
174 "=a"(eax), \ 183 "=a"(eax), \
175 "=b"(ebx), \ 184 "=b"(ebx), \
176 "=c"(ecx), \ 185 "=c"(ecx), \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 648f8127f65a..3d667e903beb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -482,6 +482,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
482 return ret; 482 return ret;
483 } 483 }
484 484
485 vps->dmabuf_size = size;
486
485 /* 487 /*
486 * TTM already thinks the buffer is pinned, but make sure the 488 * TTM already thinks the buffer is pinned, but make sure the
487 * pin_count is upped. 489 * pin_count is upped.
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f249a4428458..6ec307c93ece 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -272,7 +272,7 @@ config SENSORS_K8TEMP
272 272
273config SENSORS_K10TEMP 273config SENSORS_K10TEMP
274 tristate "AMD Family 10h+ temperature sensor" 274 tristate "AMD Family 10h+ temperature sensor"
275 depends on X86 && PCI 275 depends on X86 && PCI && AMD_NB
276 help 276 help
277 If you say yes here you get support for the temperature 277 If you say yes here you get support for the temperature
278 sensor(s) inside your CPU. Supported are later revisions of 278 sensor(s) inside your CPU. Supported are later revisions of
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d2cc55e21374..3b73dee6fdc6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/amd_nb.h>
26#include <asm/processor.h> 27#include <asm/processor.h>
27 28
28MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); 29MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
@@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
40#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 41#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
41#endif 42#endif
42 43
43#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB 44#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
44#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 45#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
45#endif 46#endif
46 47
47/* CPUID function 0x80000001, ebx */ 48/* CPUID function 0x80000001, ebx */
@@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
63#define NB_CAP_HTC 0x00000400 64#define NB_CAP_HTC 0x00000400
64 65
65/* 66/*
66 * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE 67 * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
67 * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature 68 * and REG_REPORTED_TEMPERATURE have been moved to
68 * Control] 69 * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
70 * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
69 */ 71 */
72#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
70#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 73#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
71 74
72/* F17h M01h Access througn SMN */ 75/* F17h M01h Access througn SMN */
@@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
74 77
75struct k10temp_data { 78struct k10temp_data {
76 struct pci_dev *pdev; 79 struct pci_dev *pdev;
80 void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
77 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); 81 void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
78 int temp_offset; 82 int temp_offset;
79 u32 temp_adjust_mask; 83 u32 temp_adjust_mask;
@@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = {
98 { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, 102 { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
99}; 103};
100 104
105static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
106{
107 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
108}
109
101static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) 110static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
102{ 111{
103 pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); 112 pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
@@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
114 mutex_unlock(&nb_smu_ind_mutex); 123 mutex_unlock(&nb_smu_ind_mutex);
115} 124}
116 125
126static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
127{
128 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
129 F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
130}
131
117static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) 132static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
118{ 133{
119 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, 134 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
@@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
122 137
123static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) 138static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
124{ 139{
125 amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60, 140 amd_smn_read(amd_pci_dev_to_node_id(pdev),
126 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); 141 F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
127} 142}
128 143
129static ssize_t temp1_input_show(struct device *dev, 144static ssize_t temp1_input_show(struct device *dev,
@@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev,
160 u32 regval; 175 u32 regval;
161 int value; 176 int value;
162 177
163 pci_read_config_dword(data->pdev, 178 data->read_htcreg(data->pdev, &regval);
164 REG_HARDWARE_THERMAL_CONTROL, &regval);
165 value = ((regval >> 16) & 0x7f) * 500 + 52000; 179 value = ((regval >> 16) & 0x7f) * 500 + 52000;
166 if (show_hyst) 180 if (show_hyst)
167 value -= ((regval >> 24) & 0xf) * 500; 181 value -= ((regval >> 24) & 0xf) * 500;
@@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
181 struct pci_dev *pdev = data->pdev; 195 struct pci_dev *pdev = data->pdev;
182 196
183 if (index >= 2) { 197 if (index >= 2) {
184 u32 reg_caps, reg_htc; 198 u32 reg;
199
200 if (!data->read_htcreg)
201 return 0;
185 202
186 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, 203 pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
187 &reg_caps); 204 &reg);
188 pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, 205 if (!(reg & NB_CAP_HTC))
189 &reg_htc); 206 return 0;
190 if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE)) 207
208 data->read_htcreg(data->pdev, &reg);
209 if (!(reg & HTC_ENABLE))
191 return 0; 210 return 0;
192 } 211 }
193 return attr->mode; 212 return attr->mode;
@@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev,
268 287
269 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || 288 if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
270 boot_cpu_data.x86_model == 0x70)) { 289 boot_cpu_data.x86_model == 0x70)) {
290 data->read_htcreg = read_htcreg_nb_f15;
271 data->read_tempreg = read_tempreg_nb_f15; 291 data->read_tempreg = read_tempreg_nb_f15;
272 } else if (boot_cpu_data.x86 == 0x17) { 292 } else if (boot_cpu_data.x86 == 0x17) {
273 data->temp_adjust_mask = 0x80000; 293 data->temp_adjust_mask = 0x80000;
274 data->read_tempreg = read_tempreg_nb_f17; 294 data->read_tempreg = read_tempreg_nb_f17;
275 } else { 295 } else {
296 data->read_htcreg = read_htcreg_pci;
276 data->read_tempreg = read_tempreg_pci; 297 data->read_tempreg = read_tempreg_pci;
277 } 298 }
278 299
@@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = {
302 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 323 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
303 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 324 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
304 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 325 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
305 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, 326 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
306 {} 327 {}
307}; 328};
308MODULE_DEVICE_TABLE(pci, k10temp_id_table); 329MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index fd36c39ddf4e..0cdba29ae0a9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
209 i2c_dw_disable_int(dev); 209 i2c_dw_disable_int(dev);
210 210
211 /* Enable the adapter */ 211 /* Enable the adapter */
212 __i2c_dw_enable_and_wait(dev, true); 212 __i2c_dw_enable(dev, true);
213
214 /* Dummy read to avoid the register getting stuck on Bay Trail */
215 dw_readl(dev, DW_IC_ENABLE_STATUS);
213 216
214 /* Clear and enable interrupts */ 217 /* Clear and enable interrupts */
215 dw_readl(dev, DW_IC_CLR_INTR); 218 dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 2aa0e83174c5..dae8ac618a52 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
564 * TODO: We could potentially loop and retry in the case 564 * TODO: We could potentially loop and retry in the case
565 * of MSP_TWI_XFER_TIMEOUT. 565 * of MSP_TWI_XFER_TIMEOUT.
566 */ 566 */
567 return -1; 567 return -EIO;
568 } 568 }
569 569
570 return 0; 570 return num;
571} 571}
572 572
573static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) 573static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index e4be86b3de9a..7235c7302bb7 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
337 } 337 }
338 mutex_unlock(&vb->lock); 338 mutex_unlock(&vb->lock);
339 } 339 }
340 return 0; 340 return num;
341error: 341error:
342 mutex_unlock(&vb->lock); 342 mutex_unlock(&vb->lock);
343 return error; 343 return error;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index a9126b3cda61..7c3b4740b94b 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -445,10 +445,17 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
445 msgs[1].buf = buffer; 445 msgs[1].buf = buffer;
446 446
447 ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); 447 ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
448 if (ret < 0) 448 if (ret < 0) {
449 dev_err(&client->adapter->dev, "i2c read failed\n"); 449 /* Getting a NACK is unfortunately normal with some DSTDs */
450 else 450 if (ret == -EREMOTEIO)
451 dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
452 data_len, client->addr, cmd, ret);
453 else
454 dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
455 data_len, client->addr, cmd, ret);
456 } else {
451 memcpy(data, buffer, data_len); 457 memcpy(data, buffer, data_len);
458 }
452 459
453 kfree(buffer); 460 kfree(buffer);
454 return ret; 461 return ret;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 1ba40bb2b966..a17f46a95f73 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -363,7 +363,7 @@ static int i2c_device_probe(struct device *dev)
363 goto err_clear_wakeup_irq; 363 goto err_clear_wakeup_irq;
364 364
365 status = dev_pm_domain_attach(&client->dev, true); 365 status = dev_pm_domain_attach(&client->dev, true);
366 if (status == -EPROBE_DEFER) 366 if (status)
367 goto err_clear_wakeup_irq; 367 goto err_clear_wakeup_irq;
368 368
369 /* 369 /*
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 9a4e899d94b3..2b6c9b516070 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
119 umem->length = size; 119 umem->length = size;
120 umem->address = addr; 120 umem->address = addr;
121 umem->page_shift = PAGE_SHIFT; 121 umem->page_shift = PAGE_SHIFT;
122 umem->pid = get_task_pid(current, PIDTYPE_PID);
123 /* 122 /*
124 * We ask for writable memory if any of the following 123 * We ask for writable memory if any of the following
125 * access flags are set. "Local write" and "remote write" 124 * access flags are set. "Local write" and "remote write"
@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
132 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 131 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
133 132
134 if (access & IB_ACCESS_ON_DEMAND) { 133 if (access & IB_ACCESS_ON_DEMAND) {
135 put_pid(umem->pid);
136 ret = ib_umem_odp_get(context, umem, access); 134 ret = ib_umem_odp_get(context, umem, access);
137 if (ret) { 135 if (ret) {
138 kfree(umem); 136 kfree(umem);
@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
148 146
149 page_list = (struct page **) __get_free_page(GFP_KERNEL); 147 page_list = (struct page **) __get_free_page(GFP_KERNEL);
150 if (!page_list) { 148 if (!page_list) {
151 put_pid(umem->pid);
152 kfree(umem); 149 kfree(umem);
153 return ERR_PTR(-ENOMEM); 150 return ERR_PTR(-ENOMEM);
154 } 151 }
@@ -231,7 +228,6 @@ out:
231 if (ret < 0) { 228 if (ret < 0) {
232 if (need_release) 229 if (need_release)
233 __ib_umem_release(context->device, umem, 0); 230 __ib_umem_release(context->device, umem, 0);
234 put_pid(umem->pid);
235 kfree(umem); 231 kfree(umem);
236 } else 232 } else
237 current->mm->pinned_vm = locked; 233 current->mm->pinned_vm = locked;
@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
274 270
275 __ib_umem_release(umem->context->device, umem, 1); 271 __ib_umem_release(umem->context->device, umem, 1);
276 272
277 task = get_pid_task(umem->pid, PIDTYPE_PID); 273 task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
278 put_pid(umem->pid);
279 if (!task) 274 if (!task)
280 goto out; 275 goto out;
281 mm = get_task_mm(task); 276 mm = get_task_mm(task);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e90f2fd8dc16..1445918e3239 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
489err_dereg_mem: 489err_dereg_mem:
490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); 491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
492err_free_wr_wait:
493 c4iw_put_wr_wait(mhp->wr_waitp);
494err_free_skb: 492err_free_skb:
495 kfree_skb(mhp->dereg_skb); 493 kfree_skb(mhp->dereg_skb);
494err_free_wr_wait:
495 c4iw_put_wr_wait(mhp->wr_waitp);
496err_free_mhp: 496err_free_mhp:
497 kfree(mhp); 497 kfree(mhp);
498 return ERR_PTR(ret); 498 return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e6a60fa59f2b..e6bdd0c1e80a 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5944 u64 status; 5944 u64 status;
5945 u32 sw_index; 5945 u32 sw_index;
5946 int i = 0; 5946 int i = 0;
5947 unsigned long irq_flags;
5947 5948
5948 sw_index = dd->hw_to_sw[hw_context]; 5949 sw_index = dd->hw_to_sw[hw_context];
5949 if (sw_index >= dd->num_send_contexts) { 5950 if (sw_index >= dd->num_send_contexts) {
@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5953 return; 5954 return;
5954 } 5955 }
5955 sci = &dd->send_contexts[sw_index]; 5956 sci = &dd->send_contexts[sw_index];
5957 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5956 sc = sci->sc; 5958 sc = sci->sc;
5957 if (!sc) { 5959 if (!sc) {
5958 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5960 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5959 sw_index, hw_context); 5961 sw_index, hw_context);
5962 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5960 return; 5963 return;
5961 } 5964 }
5962 5965
@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5978 */ 5981 */
5979 if (sc->type != SC_USER) 5982 if (sc->type != SC_USER)
5980 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5983 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5984 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5981 5985
5982 /* 5986 /*
5983 * Update the counters for the corresponding status bits. 5987 * Update the counters for the corresponding status bits.
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 14734d0d0b76..3a485f50fede 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
377 377
378 hr_cq->set_ci_db = hr_cq->db.db_record; 378 hr_cq->set_ci_db = hr_cq->db.db_record;
379 *hr_cq->set_ci_db = 0; 379 *hr_cq->set_ci_db = 0;
380 hr_cq->db_en = 1;
380 } 381 }
381 382
382 /* Init mmt table and write buff address to mtt table */ 383 /* Init mmt table and write buff address to mtt table */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 47e1b6ac1e1a..8013d69c5ac4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
722 free_mr->mr_free_pd = to_hr_pd(pd); 722 free_mr->mr_free_pd = to_hr_pd(pd);
723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; 723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
724 free_mr->mr_free_pd->ibpd.uobject = NULL; 724 free_mr->mr_free_pd->ibpd.uobject = NULL;
725 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
725 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); 726 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
726 727
727 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; 728 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
@@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1036 1037
1037 do { 1038 do {
1038 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1039 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1039 if (ret < 0) { 1040 if (ret < 0 && hr_qp) {
1040 dev_err(dev, 1041 dev_err(dev,
1041 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", 1042 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1042 hr_qp->qpn, ret, hr_mr->key, ne); 1043 hr_qp->qpn, ret, hr_mr->key, ne);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 25916e8522ed..1f0965bb64ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
142 unsigned long flags; 142 unsigned long flags;
143 unsigned int ind; 143 unsigned int ind;
144 void *wqe = NULL; 144 void *wqe = NULL;
145 u32 tmp_len = 0;
146 bool loopback; 145 bool loopback;
146 u32 tmp_len;
147 int ret = 0; 147 int ret = 0;
148 u8 *smac; 148 u8 *smac;
149 int nreq; 149 int nreq;
@@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
189 189
190 owner_bit = 190 owner_bit =
191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
192 tmp_len = 0;
192 193
193 /* Corresponding to the QP type, wqe process separately */ 194 /* Corresponding to the QP type, wqe process separately */
194 if (ibqp->qp_type == IB_QPT_GSI) { 195 if (ibqp->qp_type == IB_QPT_GSI) {
@@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
547 } 548 }
548 549
549 if (i < hr_qp->rq.max_gs) { 550 if (i < hr_qp->rq.max_gs) {
550 dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 551 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
551 dseg[i].addr = 0; 552 dseg->addr = 0;
552 } 553 }
553 554
554 /* rq support inline data */ 555 /* rq support inline data */
555 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; 556 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
556 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge; 557 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
557 for (i = 0; i < wr->num_sge; i++) { 558 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
558 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; 559 (u32)wr->num_sge;
559 sge_list[i].len = wr->sg_list[i].length; 560 for (i = 0; i < wr->num_sge; i++) {
561 sge_list[i].addr =
562 (void *)(u64)wr->sg_list[i].addr;
563 sge_list[i].len = wr->sg_list[i].length;
564 }
560 } 565 }
561 566
562 hr_qp->rq.wrid[ind] = wr->wr_id; 567 hr_qp->rq.wrid[ind] = wr->wr_id;
@@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
613 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, 618 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
614 ring->desc_num * sizeof(struct hns_roce_cmq_desc), 619 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
615 DMA_BIDIRECTIONAL); 620 DMA_BIDIRECTIONAL);
621
622 ring->desc_dma_addr = 0;
616 kfree(ring->desc); 623 kfree(ring->desc);
617} 624}
618 625
@@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1081 if (ret) { 1088 if (ret) {
1082 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", 1089 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1083 ret); 1090 ret);
1091 return ret;
1084 } 1092 }
1085 1093
1086 /* Get pf resource owned by every pf */ 1094 /* Get pf resource owned by every pf */
@@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1372 1380
1373 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 1381 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1374 mr->type == MR_TYPE_MR ? 0 : 1); 1382 mr->type == MR_TYPE_MR ? 0 : 1);
1383 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1384 1);
1375 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); 1385 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1376 1386
1377 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 1387 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
@@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2169 struct hns_roce_v2_qp_context *context, 2179 struct hns_roce_v2_qp_context *context,
2170 struct hns_roce_v2_qp_context *qpc_mask) 2180 struct hns_roce_v2_qp_context *qpc_mask)
2171{ 2181{
2182 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2172 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 2183 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2173 2184
2174 /* 2185 /*
@@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2281 context->rq_db_record_addr = hr_qp->rdb.dma >> 32; 2292 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2282 qpc_mask->rq_db_record_addr = 0; 2293 qpc_mask->rq_db_record_addr = 0;
2283 2294
2284 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1); 2295 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2296 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2285 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); 2297 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2286 2298
2287 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, 2299 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
4703 {0, } 4715 {0, }
4704}; 4716};
4705 4717
4718MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
4719
4706static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, 4720static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
4707 struct hnae3_handle *handle) 4721 struct hnae3_handle *handle)
4708{ 4722{
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 9d48bc07a9e6..96fb6a9ed93c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
199 199
200 memset(props, 0, sizeof(*props)); 200 memset(props, 0, sizeof(*props));
201 201
202 props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid); 202 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
203 props->max_mr_size = (u64)(~(0ULL)); 203 props->max_mr_size = (u64)(~(0ULL));
204 props->page_size_cap = hr_dev->caps.page_size_cap; 204 props->page_size_cap = hr_dev->caps.page_size_cap;
205 props->vendor_id = hr_dev->vendor_id; 205 props->vendor_id = hr_dev->vendor_id;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d4aad34c21e2..baaf906f7c2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
660 goto err_rq_sge_list; 660 goto err_rq_sge_list;
661 } 661 }
662 *hr_qp->rdb.db_record = 0; 662 *hr_qp->rdb.db_record = 0;
663 hr_qp->rdb_en = 1;
663 } 664 }
664 665
665 /* Allocate QP buf */ 666 /* Allocate QP buf */
@@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
955 } 956 }
956 957
957 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 958 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
958 ret = 0; 959 if (hr_dev->caps.min_wqes) {
960 ret = -EPERM;
961 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
962 new_state);
963 } else {
964 ret = 0;
965 }
966
959 goto out; 967 goto out;
960 } 968 }
961 969
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index d5d8c1be345a..2f2b4426ded7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -207,6 +207,7 @@ struct i40iw_msix_vector {
207 u32 irq; 207 u32 irq;
208 u32 cpu_affinity; 208 u32 cpu_affinity;
209 u32 ceq_id; 209 u32 ceq_id;
210 cpumask_t mask;
210}; 211};
211 212
212struct l2params_work { 213struct l2params_work {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 4cfa8f4647e2..f7c6fd9ff6e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2093 if (netif_is_bond_slave(netdev)) 2093 if (netif_is_bond_slave(netdev))
2094 netdev = netdev_master_upper_dev_get(netdev); 2094 netdev = netdev_master_upper_dev_get(netdev);
2095 2095
2096 neigh = dst_neigh_lookup(dst, &dst_addr); 2096 neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
2097 2097
2098 rcu_read_lock(); 2098 rcu_read_lock();
2099 if (neigh) { 2099 if (neigh) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 6139836fb533..c9f62ca7643c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
331 switch (info->ae_id) { 331 switch (info->ae_id) {
332 case I40IW_AE_LLP_FIN_RECEIVED: 332 case I40IW_AE_LLP_FIN_RECEIVED:
333 if (qp->term_flags) 333 if (qp->term_flags)
334 continue; 334 break;
335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) { 335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; 336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && 337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
@@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
360 break; 360 break;
361 case I40IW_AE_LLP_CONNECTION_RESET: 361 case I40IW_AE_LLP_CONNECTION_RESET:
362 if (atomic_read(&iwqp->close_timer_started)) 362 if (atomic_read(&iwqp->close_timer_started))
363 continue; 363 break;
364 i40iw_cm_disconn(iwqp); 364 i40iw_cm_disconn(iwqp);
365 break; 365 break;
366 case I40IW_AE_QP_SUSPEND_COMPLETE: 366 case I40IW_AE_QP_SUSPEND_COMPLETE:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 9cd0d3ef9057..05001e6da1f8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
687 struct i40iw_msix_vector *msix_vec) 687 struct i40iw_msix_vector *msix_vec)
688{ 688{
689 enum i40iw_status_code status; 689 enum i40iw_status_code status;
690 cpumask_t mask;
691 690
692 if (iwdev->msix_shared && !ceq_id) { 691 if (iwdev->msix_shared && !ceq_id) {
693 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); 692 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
697 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); 696 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
698 } 697 }
699 698
700 cpumask_clear(&mask); 699 cpumask_clear(&msix_vec->mask);
701 cpumask_set_cpu(msix_vec->cpu_affinity, &mask); 700 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
702 irq_set_affinity_hint(msix_vec->irq, &mask); 701 irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
703 702
704 if (status) { 703 if (status) {
705 i40iw_pr_err("ceq irq config fail\n"); 704 i40iw_pr_err("ceq irq config fail\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 40e4f5ab2b46..68679ad4c6da 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
394 394
395 list_for_each_entry(iwpbl, pbl_list, list) { 395 list_for_each_entry(iwpbl, pbl_list, list) {
396 if (iwpbl->user_base == va) { 396 if (iwpbl->user_base == va) {
397 iwpbl->on_list = false;
397 list_del(&iwpbl->list); 398 list_del(&iwpbl->list);
398 return iwpbl; 399 return iwpbl;
399 } 400 }
@@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
614 return ERR_PTR(-ENOMEM); 615 return ERR_PTR(-ENOMEM);
615 616
616 iwqp = (struct i40iw_qp *)mem; 617 iwqp = (struct i40iw_qp *)mem;
618 iwqp->allocated_buffer = mem;
617 qp = &iwqp->sc_qp; 619 qp = &iwqp->sc_qp;
618 qp->back_qp = (void *)iwqp; 620 qp->back_qp = (void *)iwqp;
619 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; 621 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
@@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
642 goto error; 644 goto error;
643 } 645 }
644 646
645 iwqp->allocated_buffer = mem;
646 iwqp->iwdev = iwdev; 647 iwqp->iwdev = iwdev;
647 iwqp->iwpd = iwpd; 648 iwqp->iwpd = iwpd;
648 iwqp->ibqp.qp_num = qp_num; 649 iwqp->ibqp.qp_num = qp_num;
@@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1898 goto error; 1899 goto error;
1899 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 1900 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1900 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 1901 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1902 iwpbl->on_list = true;
1901 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 1903 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1902 break; 1904 break;
1903 case IW_MEMREG_TYPE_CQ: 1905 case IW_MEMREG_TYPE_CQ:
@@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1908 1910
1909 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1911 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1910 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 1912 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1913 iwpbl->on_list = true;
1911 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1914 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1912 break; 1915 break;
1913 case IW_MEMREG_TYPE_MEM: 1916 case IW_MEMREG_TYPE_MEM:
@@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2045 switch (iwmr->type) { 2048 switch (iwmr->type) {
2046 case IW_MEMREG_TYPE_CQ: 2049 case IW_MEMREG_TYPE_CQ:
2047 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2050 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2048 if (!list_empty(&ucontext->cq_reg_mem_list)) 2051 if (iwpbl->on_list) {
2052 iwpbl->on_list = false;
2049 list_del(&iwpbl->list); 2053 list_del(&iwpbl->list);
2054 }
2050 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2055 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2051 break; 2056 break;
2052 case IW_MEMREG_TYPE_QP: 2057 case IW_MEMREG_TYPE_QP:
2053 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2058 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2054 if (!list_empty(&ucontext->qp_reg_mem_list)) 2059 if (iwpbl->on_list) {
2060 iwpbl->on_list = false;
2055 list_del(&iwpbl->list); 2061 list_del(&iwpbl->list);
2062 }
2056 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2063 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2057 break; 2064 break;
2058 default: 2065 default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 9067443cd311..76cf173377ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -78,6 +78,7 @@ struct i40iw_pbl {
78 }; 78 };
79 79
80 bool pbl_allocated; 80 bool pbl_allocated;
81 bool on_list;
81 u64 user_base; 82 u64 user_base;
82 struct i40iw_pble_alloc pble_alloc; 83 struct i40iw_pble_alloc pble_alloc;
83 struct i40iw_mr *iwmr; 84 struct i40iw_mr *iwmr;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b4d8ff8ab807..69716a7ea993 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2417} 2417}
2418 2418
2419static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val, 2419static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2420 bool inner) 2420 bool inner)
2421{ 2421{
2422 if (inner) { 2422 if (inner) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 87b7c1be2a11..2193dc1765fb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
484 return 1; 484 return 1;
485} 485}
486 486
487static int first_med_bfreg(void)
488{
489 return 1;
490}
491
492enum { 487enum {
493 /* this is the first blue flame register in the array of bfregs assigned 488 /* this is the first blue flame register in the array of bfregs assigned
494 * to a processes. Since we do not use it for blue flame but rather 489 * to a processes. Since we do not use it for blue flame but rather
@@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev,
514 return n >= 0 ? n : 0; 509 return n >= 0 ? n : 0;
515} 510}
516 511
512static int first_med_bfreg(struct mlx5_ib_dev *dev,
513 struct mlx5_bfreg_info *bfregi)
514{
515 return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
516}
517
517static int first_hi_bfreg(struct mlx5_ib_dev *dev, 518static int first_hi_bfreg(struct mlx5_ib_dev *dev,
518 struct mlx5_bfreg_info *bfregi) 519 struct mlx5_bfreg_info *bfregi)
519{ 520{
@@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
541static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 542static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
542 struct mlx5_bfreg_info *bfregi) 543 struct mlx5_bfreg_info *bfregi)
543{ 544{
544 int minidx = first_med_bfreg(); 545 int minidx = first_med_bfreg(dev, bfregi);
545 int i; 546 int i;
546 547
547 for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { 548 if (minidx < 0)
549 return minidx;
550
551 for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
548 if (bfregi->count[i] < bfregi->count[minidx]) 552 if (bfregi->count[i] < bfregi->count[minidx])
549 minidx = i; 553 minidx = i;
550 if (!bfregi->count[minidx]) 554 if (!bfregi->count[minidx])
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7d3763b2e01c..3f9afc02d166 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
401{ 401{
402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context); 402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
403 struct qedr_dev *dev = get_qedr_dev(context->device); 403 struct qedr_dev *dev = get_qedr_dev(context->device);
404 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 404 unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
405 u64 unmapped_db = dev->db_phys_addr;
406 unsigned long len = (vma->vm_end - vma->vm_start); 405 unsigned long len = (vma->vm_end - vma->vm_start);
407 int rc = 0; 406 unsigned long dpi_start;
408 bool found; 407
408 dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
409 409
410 DP_DEBUG(dev, QEDR_MSG_INIT, 410 DP_DEBUG(dev, QEDR_MSG_INIT,
411 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", 411 "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
412 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); 412 (void *)vma->vm_start, (void *)vma->vm_end,
413 if (vma->vm_start & (PAGE_SIZE - 1)) { 413 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
414 DP_ERR(dev, "Vma_start not page aligned = %ld\n", 414
415 vma->vm_start); 415 if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
416 DP_ERR(dev,
417 "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
418 (void *)vma->vm_start, (void *)vma->vm_end);
416 return -EINVAL; 419 return -EINVAL;
417 } 420 }
418 421
419 found = qedr_search_mmap(ucontext, vm_page, len); 422 if (!qedr_search_mmap(ucontext, phys_addr, len)) {
420 if (!found) { 423 DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
421 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
422 vma->vm_pgoff); 424 vma->vm_pgoff);
423 return -EINVAL; 425 return -EINVAL;
424 } 426 }
425 427
426 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 428 if (phys_addr < dpi_start ||
427 429 ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
428 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 430 DP_ERR(dev,
429 dev->db_size))) { 431 "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
430 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 432 (void *)phys_addr, (void *)dpi_start,
431 if (vma->vm_flags & VM_READ) { 433 ucontext->dpi_size);
432 DP_ERR(dev, "Trying to map doorbell bar for read\n"); 434 return -EINVAL;
433 return -EPERM; 435 }
434 }
435
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437 436
438 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 437 if (vma->vm_flags & VM_READ) {
439 PAGE_SIZE, vma->vm_page_prot); 438 DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
440 } else { 439 return -EINVAL;
441 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
442 rc = remap_pfn_range(vma, vma->vm_start,
443 vma->vm_pgoff, len, vma->vm_page_prot);
444 } 440 }
445 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); 441
446 return rc; 442 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
443 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
444 vma->vm_page_prot);
447} 445}
448 446
449struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, 447struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 2cb52fd48cf1..73a00a1c06f6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
761 unsigned int mask; 761 unsigned int mask;
762 unsigned int length = 0; 762 unsigned int length = 0;
763 int i; 763 int i;
764 int must_sched;
765 764
766 while (wr) { 765 while (wr) {
767 mask = wr_opcode_mask(wr->opcode, qp); 766 mask = wr_opcode_mask(wr->opcode, qp);
@@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
791 wr = wr->next; 790 wr = wr->next;
792 } 791 }
793 792
794 /* 793 rxe_run_task(&qp->req.task, 1);
795 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
796 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
797 */
798 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
799 (queue_count(qp->sq.queue) > 1);
800
801 rxe_run_task(&qp->req.task, must_sched);
802 if (unlikely(qp->req.state == QP_STATE_ERROR)) 794 if (unlikely(qp->req.state == QP_STATE_ERROR))
803 rxe_run_task(&qp->comp.task, 1); 795 rxe_run_task(&qp->comp.task, 1);
804 796
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
index fb8b7182f05e..25bf6955b6d0 100644
--- a/drivers/infiniband/ulp/srpt/Kconfig
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_SRPT 1config INFINIBAND_SRPT
2 tristate "InfiniBand SCSI RDMA Protocol target support" 2 tristate "InfiniBand SCSI RDMA Protocol target support"
3 depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE 3 depends on INFINIBAND_ADDR_TRANS && TARGET_CORE
4 ---help--- 4 ---help---
5 5
6 Support for the SCSI RDMA Protocol (SRP) Target driver. The 6 Support for the SCSI RDMA Protocol (SRP) Target driver. The
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 29f99529b187..cfcb32559925 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
130 bool max_baseline, u8 *value) 130 bool max_baseline, u8 *value)
131{ 131{
132 int error; 132 int error;
133 u8 val[3]; 133 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
134 134
135 error = i2c_smbus_read_block_data(client, 135 error = i2c_smbus_read_block_data(client,
136 max_baseline ? 136 max_baseline ?
@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
149 bool iap, u8 *version) 149 bool iap, u8 *version)
150{ 150{
151 int error; 151 int error;
152 u8 val[3]; 152 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
153 153
154 error = i2c_smbus_read_block_data(client, 154 error = i2c_smbus_read_block_data(client,
155 iap ? ETP_SMBUS_IAP_VERSION_CMD : 155 iap ? ETP_SMBUS_IAP_VERSION_CMD :
@@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
170 u8 *clickpad) 170 u8 *clickpad)
171{ 171{
172 int error; 172 int error;
173 u8 val[3]; 173 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
174 174
175 error = i2c_smbus_read_block_data(client, 175 error = i2c_smbus_read_block_data(client,
176 ETP_SMBUS_SM_VERSION_CMD, val); 176 ETP_SMBUS_SM_VERSION_CMD, val);
@@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
188static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) 188static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
189{ 189{
190 int error; 190 int error;
191 u8 val[3]; 191 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
192 192
193 error = i2c_smbus_read_block_data(client, 193 error = i2c_smbus_read_block_data(client,
194 ETP_SMBUS_UNIQUEID_CMD, val); 194 ETP_SMBUS_UNIQUEID_CMD, val);
@@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
205 bool iap, u16 *csum) 205 bool iap, u16 *csum)
206{ 206{
207 int error; 207 int error;
208 u8 val[3]; 208 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
209 209
210 error = i2c_smbus_read_block_data(client, 210 error = i2c_smbus_read_block_data(client,
211 iap ? ETP_SMBUS_FW_CHECKSUM_CMD : 211 iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
@@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
226{ 226{
227 int ret; 227 int ret;
228 int error; 228 int error;
229 u8 val[3]; 229 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
230 230
231 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val); 231 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
232 if (ret != 3) { 232 if (ret != 3) {
@@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
246{ 246{
247 int ret; 247 int ret;
248 int error; 248 int error;
249 u8 val[3]; 249 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
250 250
251 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val); 251 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
252 if (ret != 3) { 252 if (ret != 3) {
@@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
267{ 267{
268 int ret; 268 int ret;
269 int error; 269 int error;
270 u8 val[3]; 270 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
271 271
272 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val); 272 ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
273 if (ret != 3) { 273 if (ret != 3) {
@@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
294{ 294{
295 int error; 295 int error;
296 u16 constant; 296 u16 constant;
297 u8 val[3]; 297 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
298 298
299 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val); 299 error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
300 if (error < 0) { 300 if (error < 0) {
@@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
345 int len; 345 int len;
346 int error; 346 int error;
347 enum tp_mode mode; 347 enum tp_mode mode;
348 u8 val[3]; 348 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
349 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06}; 349 u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
350 u16 password; 350 u16 password;
351 351
@@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
419 struct device *dev = &client->dev; 419 struct device *dev = &client->dev;
420 int error; 420 int error;
421 u16 result; 421 u16 result;
422 u8 val[3]; 422 u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
423 423
424 /* 424 /*
425 * Due to the limitation of smbus protocol limiting 425 * Due to the limitation of smbus protocol limiting
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 60f2c463d1cc..a9591d278145 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
172 "LEN0048", /* X1 Carbon 3 */ 172 "LEN0048", /* X1 Carbon 3 */
173 "LEN0046", /* X250 */ 173 "LEN0046", /* X250 */
174 "LEN004a", /* W541 */ 174 "LEN004a", /* W541 */
175 "LEN0071", /* T480 */
176 "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
177 "LEN0073", /* X1 Carbon G5 (Elantech) */
178 "LEN0092", /* X1 Carbon 6 */
179 "LEN0096", /* X280 */
180 "LEN0097", /* X280 -> ALPS trackpoint */
175 "LEN200f", /* T450s */ 181 "LEN200f", /* T450s */
176 NULL 182 NULL
177}; 183};
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index 944a7f338099..1b25d8bc153a 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
388** Receive and process command from user mode utility 388** Receive and process command from user mode utility
389*/ 389*/
390void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 390void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
391 int length, 391 int length, void *mptr,
392 divas_xdi_copy_from_user_fn_t cp_fn) 392 divas_xdi_copy_from_user_fn_t cp_fn)
393{ 393{
394 diva_xdi_um_cfg_cmd_t msg; 394 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
395 diva_os_xdi_adapter_t *a = NULL; 395 diva_os_xdi_adapter_t *a = NULL;
396 diva_os_spin_lock_magic_t old_irql; 396 diva_os_spin_lock_magic_t old_irql;
397 struct list_head *tmp; 397 struct list_head *tmp;
@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
401 length, sizeof(diva_xdi_um_cfg_cmd_t))) 401 length, sizeof(diva_xdi_um_cfg_cmd_t)))
402 return NULL; 402 return NULL;
403 } 403 }
404 if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { 404 if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
405 DBG_ERR(("A: A(?) open, write error")) 405 DBG_ERR(("A: A(?) open, write error"))
406 return NULL; 406 return NULL;
407 } 407 }
408 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 408 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
409 list_for_each(tmp, &adapter_queue) { 409 list_for_each(tmp, &adapter_queue) {
410 a = list_entry(tmp, diva_os_xdi_adapter_t, link); 410 a = list_entry(tmp, diva_os_xdi_adapter_t, link);
411 if (a->controller == (int)msg.adapter) 411 if (a->controller == (int)msg->adapter)
412 break; 412 break;
413 a = NULL; 413 a = NULL;
414 } 414 }
415 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 415 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
416 416
417 if (!a) { 417 if (!a) {
418 DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) 418 DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
419 } 419 }
420 420
421 return (a); 421 return (a);
@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
437 437
438int 438int
439diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 439diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
440 int length, divas_xdi_copy_from_user_fn_t cp_fn) 440 int length, void *mptr,
441 divas_xdi_copy_from_user_fn_t cp_fn)
441{ 442{
443 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
442 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; 444 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
443 void *data; 445 void *data;
444 446
@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
459 return (-2); 461 return (-2);
460 } 462 }
461 463
462 length = (*cp_fn) (os_handle, data, src, length); 464 if (msg) {
465 *(diva_xdi_um_cfg_cmd_t *)data = *msg;
466 length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
467 src + sizeof(*msg), length - sizeof(*msg));
468 } else {
469 length = (*cp_fn) (os_handle, data, src, length);
470 }
463 if (length > 0) { 471 if (length > 0) {
464 if ((*(a->interface.cmd_proc)) 472 if ((*(a->interface.cmd_proc))
465 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { 473 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index b067032093a8..1ad76650fbf9 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
20 int max_length, divas_xdi_copy_to_user_fn_t cp_fn); 20 int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
21 21
22int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 22int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
23 int length, divas_xdi_copy_from_user_fn_t cp_fn); 23 int length, void *msg,
24 divas_xdi_copy_from_user_fn_t cp_fn);
24 25
25void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 26void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
26 int length, 27 int length, void *msg,
27 divas_xdi_copy_from_user_fn_t cp_fn); 28 divas_xdi_copy_from_user_fn_t cp_fn);
28 29
29void diva_xdi_close_adapter(void *adapter, void *os_handle); 30void diva_xdi_close_adapter(void *adapter, void *os_handle);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b9980e84f9db..b6a3950b2564 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
591static ssize_t divas_write(struct file *file, const char __user *buf, 591static ssize_t divas_write(struct file *file, const char __user *buf,
592 size_t count, loff_t *ppos) 592 size_t count, loff_t *ppos)
593{ 593{
594 diva_xdi_um_cfg_cmd_t msg;
594 int ret = -EINVAL; 595 int ret = -EINVAL;
595 596
596 if (!file->private_data) { 597 if (!file->private_data) {
597 file->private_data = diva_xdi_open_adapter(file, buf, 598 file->private_data = diva_xdi_open_adapter(file, buf,
598 count, 599 count, &msg,
599 xdi_copy_from_user); 600 xdi_copy_from_user);
600 } 601 if (!file->private_data)
601 if (!file->private_data) { 602 return (-ENODEV);
602 return (-ENODEV); 603 ret = diva_xdi_write(file->private_data, file,
604 buf, count, &msg, xdi_copy_from_user);
605 } else {
606 ret = diva_xdi_write(file->private_data, file,
607 buf, count, NULL, xdi_copy_from_user);
603 } 608 }
604 609
605 ret = diva_xdi_write(file->private_data, file,
606 buf, count, xdi_copy_from_user);
607 switch (ret) { 610 switch (ret) {
608 case -1: /* Message should be removed from rx mailbox first */ 611 case -1: /* Message should be removed from rx mailbox first */
609 ret = -EBUSY; 612 ret = -EBUSY;
@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
622static ssize_t divas_read(struct file *file, char __user *buf, 625static ssize_t divas_read(struct file *file, char __user *buf,
623 size_t count, loff_t *ppos) 626 size_t count, loff_t *ppos)
624{ 627{
628 diva_xdi_um_cfg_cmd_t msg;
625 int ret = -EINVAL; 629 int ret = -EINVAL;
626 630
627 if (!file->private_data) { 631 if (!file->private_data) {
628 file->private_data = diva_xdi_open_adapter(file, buf, 632 file->private_data = diva_xdi_open_adapter(file, buf,
629 count, 633 count, &msg,
630 xdi_copy_from_user); 634 xdi_copy_from_user);
631 } 635 }
632 if (!file->private_data) { 636 if (!file->private_data) {
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 4e63c6f6c04d..d030ce3025a6 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -250,7 +250,9 @@ void bch_debug_exit(void)
250 250
251int __init bch_debug_init(struct kobject *kobj) 251int __init bch_debug_init(struct kobject *kobj)
252{ 252{
253 bcache_debug = debugfs_create_dir("bcache", NULL); 253 if (!IS_ENABLED(CONFIG_DEBUG_FS))
254 return 0;
254 255
256 bcache_debug = debugfs_create_dir("bcache", NULL);
255 return IS_ERR_OR_NULL(bcache_debug); 257 return IS_ERR_OR_NULL(bcache_debug);
256} 258}
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 1b52b8557034..2060d1483043 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -419,10 +419,25 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
419 /* Verify that EC can process command */ 419 /* Verify that EC can process command */
420 for (i = 0; i < len; i++) { 420 for (i = 0; i < len; i++) {
421 rx_byte = rx_buf[i]; 421 rx_byte = rx_buf[i];
422 /*
423 * Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
424 * markers are all signs that the EC didn't fully
425 * receive our command. e.g., if the EC is flashing
426 * itself, it can't respond to any commands and instead
427 * clocks out EC_SPI_PAST_END from its SPI hardware
428 * buffer. Similar occurrences can happen if the AP is
429 * too slow to clock out data after asserting CS -- the
430 * EC will abort and fill its buffer with
431 * EC_SPI_RX_BAD_DATA.
432 *
433 * In all cases, these errors should be safe to retry.
434 * Report -EAGAIN and let the caller decide what to do
435 * about that.
436 */
422 if (rx_byte == EC_SPI_PAST_END || 437 if (rx_byte == EC_SPI_PAST_END ||
423 rx_byte == EC_SPI_RX_BAD_DATA || 438 rx_byte == EC_SPI_RX_BAD_DATA ||
424 rx_byte == EC_SPI_NOT_READY) { 439 rx_byte == EC_SPI_NOT_READY) {
425 ret = -EREMOTEIO; 440 ret = -EAGAIN;
426 break; 441 break;
427 } 442 }
428 } 443 }
@@ -431,7 +446,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
431 if (!ret) 446 if (!ret)
432 ret = cros_ec_spi_receive_packet(ec_dev, 447 ret = cros_ec_spi_receive_packet(ec_dev,
433 ec_msg->insize + sizeof(*response)); 448 ec_msg->insize + sizeof(*response));
434 else 449 else if (ret != -EAGAIN)
435 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); 450 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
436 451
437 final_ret = terminate_request(ec_dev); 452 final_ret = terminate_request(ec_dev);
@@ -537,10 +552,11 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
537 /* Verify that EC can process command */ 552 /* Verify that EC can process command */
538 for (i = 0; i < len; i++) { 553 for (i = 0; i < len; i++) {
539 rx_byte = rx_buf[i]; 554 rx_byte = rx_buf[i];
555 /* See comments in cros_ec_pkt_xfer_spi() */
540 if (rx_byte == EC_SPI_PAST_END || 556 if (rx_byte == EC_SPI_PAST_END ||
541 rx_byte == EC_SPI_RX_BAD_DATA || 557 rx_byte == EC_SPI_RX_BAD_DATA ||
542 rx_byte == EC_SPI_NOT_READY) { 558 rx_byte == EC_SPI_NOT_READY) {
543 ret = -EREMOTEIO; 559 ret = -EAGAIN;
544 break; 560 break;
545 } 561 }
546 } 562 }
@@ -549,7 +565,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
549 if (!ret) 565 if (!ret)
550 ret = cros_ec_spi_receive_response(ec_dev, 566 ret = cros_ec_spi_receive_response(ec_dev,
551 ec_msg->insize + EC_MSG_TX_PROTO_BYTES); 567 ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
552 else 568 else if (ret != -EAGAIN)
553 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); 569 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
554 570
555 final_ret = terminate_request(ec_dev); 571 final_ret = terminate_request(ec_dev);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a4c9c8297a6d..918d4fb742d1 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -717,6 +717,7 @@ struct cxl {
717 bool perst_select_user; 717 bool perst_select_user;
718 bool perst_same_image; 718 bool perst_same_image;
719 bool psl_timebase_synced; 719 bool psl_timebase_synced;
720 bool tunneled_ops_supported;
720 721
721 /* 722 /*
722 * number of contexts mapped on to this card. Possible values are: 723 * number of contexts mapped on to this card. Possible values are:
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 83f1d08058fc..4d6736f9d463 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1742,6 +1742,15 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1742 /* Required for devices using CAPP DMA mode, harmless for others */ 1742 /* Required for devices using CAPP DMA mode, harmless for others */
1743 pci_set_master(dev); 1743 pci_set_master(dev);
1744 1744
1745 adapter->tunneled_ops_supported = false;
1746
1747 if (cxl_is_power9()) {
1748 if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
1749 dev_info(&dev->dev, "Tunneled operations unsupported\n");
1750 else
1751 adapter->tunneled_ops_supported = true;
1752 }
1753
1745 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) 1754 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
1746 goto err; 1755 goto err;
1747 1756
@@ -1768,6 +1777,9 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
1768{ 1777{
1769 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); 1778 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1770 1779
1780 if (cxl_is_power9())
1781 pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
1782
1771 cxl_native_release_psl_err_irq(adapter); 1783 cxl_native_release_psl_err_irq(adapter);
1772 cxl_unmap_adapter_regs(adapter); 1784 cxl_unmap_adapter_regs(adapter);
1773 1785
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 95285b7f636f..4b5a4c5d3c01 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -78,6 +78,15 @@ static ssize_t psl_timebase_synced_show(struct device *device,
78 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced); 78 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
79} 79}
80 80
81static ssize_t tunneled_ops_supported_show(struct device *device,
82 struct device_attribute *attr,
83 char *buf)
84{
85 struct cxl *adapter = to_cxl_adapter(device);
86
87 return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->tunneled_ops_supported);
88}
89
81static ssize_t reset_adapter_store(struct device *device, 90static ssize_t reset_adapter_store(struct device *device,
82 struct device_attribute *attr, 91 struct device_attribute *attr,
83 const char *buf, size_t count) 92 const char *buf, size_t count)
@@ -183,6 +192,7 @@ static struct device_attribute adapter_attrs[] = {
183 __ATTR_RO(base_image), 192 __ATTR_RO(base_image),
184 __ATTR_RO(image_loaded), 193 __ATTR_RO(image_loaded),
185 __ATTR_RO(psl_timebase_synced), 194 __ATTR_RO(psl_timebase_synced),
195 __ATTR_RO(tunneled_ops_supported),
186 __ATTR_RW(load_image_on_perst), 196 __ATTR_RW(load_image_on_perst),
187 __ATTR_RW(perst_reloads_same_image), 197 __ATTR_RW(perst_reloads_same_image),
188 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store), 198 __ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 0c125f207aea..33053b0d1fdf 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -518,7 +518,7 @@ static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata)
518 if (of_node && of_match_device(at24_of_match, dev)) 518 if (of_node && of_match_device(at24_of_match, dev))
519 cdata = of_device_get_match_data(dev); 519 cdata = of_device_get_match_data(dev);
520 else if (id) 520 else if (id)
521 cdata = (void *)&id->driver_data; 521 cdata = (void *)id->driver_data;
522 else 522 else
523 cdata = acpi_device_get_match_data(dev); 523 cdata = acpi_device_get_match_data(dev);
524 524
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9e923cd1d80e..38a7586b00cc 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2485 break; 2485 break;
2486 } 2486 }
2487 2487
2488 return 0; 2488 return ret;
2489} 2489}
2490 2490
2491#ifdef CONFIG_COMPAT 2491#ifdef CONFIG_COMPAT
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 2b32b88949ba..b6d8203e46eb 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -139,7 +139,7 @@ static int sdio_bus_probe(struct device *dev)
139 return -ENODEV; 139 return -ENODEV;
140 140
141 ret = dev_pm_domain_attach(dev, false); 141 ret = dev_pm_domain_attach(dev, false);
142 if (ret == -EPROBE_DEFER) 142 if (ret)
143 return ret; 143 return ret;
144 144
145 /* Unbound SDIO functions are always suspended. 145 /* Unbound SDIO functions are always suspended.
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0ef741bc515d..d0e83db42ae5 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
33 const struct sdhci_iproc_data *data; 33 const struct sdhci_iproc_data *data;
34 u32 shadow_cmd; 34 u32 shadow_cmd;
35 u32 shadow_blk; 35 u32 shadow_blk;
36 bool is_cmd_shadowed;
37 bool is_blk_shadowed;
36}; 38};
37 39
38#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) 40#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
48 50
49static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) 51static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
50{ 52{
51 u32 val = sdhci_iproc_readl(host, (reg & ~3)); 53 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
52 u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; 54 struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
55 u32 val;
56 u16 word;
57
58 if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
59 /* Get the saved transfer mode */
60 val = iproc_host->shadow_cmd;
61 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
62 iproc_host->is_blk_shadowed) {
63 /* Get the saved block info */
64 val = iproc_host->shadow_blk;
65 } else {
66 val = sdhci_iproc_readl(host, (reg & ~3));
67 }
68 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
53 return word; 69 return word;
54} 70}
55 71
@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
105 121
106 if (reg == SDHCI_COMMAND) { 122 if (reg == SDHCI_COMMAND) {
107 /* Write the block now as we are issuing a command */ 123 /* Write the block now as we are issuing a command */
108 if (iproc_host->shadow_blk != 0) { 124 if (iproc_host->is_blk_shadowed) {
109 sdhci_iproc_writel(host, iproc_host->shadow_blk, 125 sdhci_iproc_writel(host, iproc_host->shadow_blk,
110 SDHCI_BLOCK_SIZE); 126 SDHCI_BLOCK_SIZE);
111 iproc_host->shadow_blk = 0; 127 iproc_host->is_blk_shadowed = false;
112 } 128 }
113 oldval = iproc_host->shadow_cmd; 129 oldval = iproc_host->shadow_cmd;
114 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 130 iproc_host->is_cmd_shadowed = false;
131 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
132 iproc_host->is_blk_shadowed) {
115 /* Block size and count are stored in shadow reg */ 133 /* Block size and count are stored in shadow reg */
116 oldval = iproc_host->shadow_blk; 134 oldval = iproc_host->shadow_blk;
117 } else { 135 } else {
@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
123 if (reg == SDHCI_TRANSFER_MODE) { 141 if (reg == SDHCI_TRANSFER_MODE) {
124 /* Save the transfer mode until the command is issued */ 142 /* Save the transfer mode until the command is issued */
125 iproc_host->shadow_cmd = newval; 143 iproc_host->shadow_cmd = newval;
144 iproc_host->is_cmd_shadowed = true;
126 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 145 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
127 /* Save the block info until the command is issued */ 146 /* Save the block info until the command is issued */
128 iproc_host->shadow_blk = newval; 147 iproc_host->shadow_blk = newval;
148 iproc_host->is_blk_shadowed = true;
129 } else { 149 } else {
130 /* Command or other regular 32-bit write */ 150 /* Command or other regular 32-bit write */
131 sdhci_iproc_writel(host, newval, reg & ~3); 151 sdhci_iproc_writel(host, newval, reg & ~3);
@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
166 186
167static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { 187static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
168 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 188 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
169 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, 189 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
170 .ops = &sdhci_iproc_32only_ops, 190 .ops = &sdhci_iproc_32only_ops,
171}; 191};
172 192
@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
206 .caps1 = SDHCI_DRIVER_TYPE_C | 226 .caps1 = SDHCI_DRIVER_TYPE_C |
207 SDHCI_DRIVER_TYPE_D | 227 SDHCI_DRIVER_TYPE_D |
208 SDHCI_SUPPORT_DDR50, 228 SDHCI_SUPPORT_DDR50,
209 .mmc_caps = MMC_CAP_1_8V_DDR,
210}; 229};
211 230
212static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { 231static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index db5ec4e8bde9..ebb1d141b900 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1194,11 +1194,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
1194 NDCB0_CMD2(NAND_CMD_READSTART); 1194 NDCB0_CMD2(NAND_CMD_READSTART);
1195 1195
1196 /* 1196 /*
1197 * Trigger the naked read operation only on the last chunk. 1197 * Trigger the monolithic read on the first chunk, then naked read on
1198 * Otherwise, use monolithic read. 1198 * intermediate chunks and finally a last naked read on the last chunk.
1199 */ 1199 */
1200 if (lt->nchunks == 1 || (chunk < lt->nchunks - 1)) 1200 if (chunk == 0)
1201 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); 1201 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1202 else if (chunk < lt->nchunks - 1)
1203 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1202 else 1204 else
1203 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); 1205 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1204 1206
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 23b45da784cb..b89acaee12d4 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
354 /* Locate the first rule available */ 354 /* Locate the first rule available */
355 if (fs->location == RX_CLS_LOC_ANY) 355 if (fs->location == RX_CLS_LOC_ANY)
356 rule_index = find_first_zero_bit(priv->cfp.used, 356 rule_index = find_first_zero_bit(priv->cfp.used,
357 bcm_sf2_cfp_rule_size(priv)); 357 priv->num_cfp_rules);
358 else 358 else
359 rule_index = fs->location; 359 rule_index = fs->location;
360 360
361 if (rule_index > bcm_sf2_cfp_rule_size(priv))
362 return -ENOSPC;
363
361 layout = &udf_tcpip4_layout; 364 layout = &udf_tcpip4_layout;
362 /* We only use one UDF slice for now */ 365 /* We only use one UDF slice for now */
363 slice_num = bcm_sf2_get_slice_number(layout, 0); 366 slice_num = bcm_sf2_get_slice_number(layout, 0);
@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
562 * first half because the HW search is by incrementing addresses. 565 * first half because the HW search is by incrementing addresses.
563 */ 566 */
564 if (fs->location == RX_CLS_LOC_ANY) 567 if (fs->location == RX_CLS_LOC_ANY)
565 rule_index[0] = find_first_zero_bit(priv->cfp.used, 568 rule_index[1] = find_first_zero_bit(priv->cfp.used,
566 bcm_sf2_cfp_rule_size(priv)); 569 priv->num_cfp_rules);
567 else 570 else
568 rule_index[0] = fs->location; 571 rule_index[1] = fs->location;
572 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
573 return -ENOSPC;
569 574
570 /* Flag it as used (cleared on error path) such that we can immediately 575 /* Flag it as used (cleared on error path) such that we can immediately
571 * obtain a second one to chain from. 576 * obtain a second one to chain from.
572 */ 577 */
573 set_bit(rule_index[0], priv->cfp.used); 578 set_bit(rule_index[1], priv->cfp.used);
574 579
575 rule_index[1] = find_first_zero_bit(priv->cfp.used, 580 rule_index[0] = find_first_zero_bit(priv->cfp.used,
576 bcm_sf2_cfp_rule_size(priv)); 581 priv->num_cfp_rules);
577 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) { 582 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
578 ret = -ENOSPC; 583 ret = -ENOSPC;
579 goto out_err; 584 goto out_err;
580 } 585 }
@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
712 /* Flag the second half rule as being used now, return it as the 717 /* Flag the second half rule as being used now, return it as the
713 * location, and flag it as unique while dumping rules 718 * location, and flag it as unique while dumping rules
714 */ 719 */
715 set_bit(rule_index[1], priv->cfp.used); 720 set_bit(rule_index[0], priv->cfp.used);
716 set_bit(rule_index[1], priv->cfp.unique); 721 set_bit(rule_index[1], priv->cfp.unique);
717 fs->location = rule_index[1]; 722 fs->location = rule_index[1];
718 723
719 return ret; 724 return ret;
720 725
721out_err: 726out_err:
722 clear_bit(rule_index[0], priv->cfp.used); 727 clear_bit(rule_index[1], priv->cfp.used);
723 return ret; 728 return ret;
724} 729}
725 730
@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
785 int ret; 790 int ret;
786 u32 reg; 791 u32 reg;
787 792
788 /* Refuse deletion of unused rules, and the default reserved rule */
789 if (!test_bit(loc, priv->cfp.used) || loc == 0)
790 return -EINVAL;
791
792 /* Indicate which rule we want to read */ 793 /* Indicate which rule we want to read */
793 bcm_sf2_cfp_rule_addr_set(priv, loc); 794 bcm_sf2_cfp_rule_addr_set(priv, loc);
794 795
@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
826 u32 next_loc = 0; 827 u32 next_loc = 0;
827 int ret; 828 int ret;
828 829
830 /* Refuse deleting unused rules, and those that are not unique since
831 * that could leave IPv6 rules with one of the chained rule in the
832 * table.
833 */
834 if (!test_bit(loc, priv->cfp.unique) || loc == 0)
835 return -EINVAL;
836
829 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 837 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
830 if (ret) 838 if (ret)
831 return ret; 839 return ret;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 36c8950dbd2d..176861bd2252 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1212 vp->mii.reg_num_mask = 0x1f; 1212 vp->mii.reg_num_mask = 0x1f;
1213 1213
1214 /* Makes sure rings are at least 16 byte aligned. */ 1214 /* Makes sure rings are at least 16 byte aligned. */
1215 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE 1215 vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1216 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1216 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1217 &vp->rx_ring_dma); 1217 &vp->rx_ring_dma, GFP_KERNEL);
1218 retval = -ENOMEM; 1218 retval = -ENOMEM;
1219 if (!vp->rx_ring) 1219 if (!vp->rx_ring)
1220 goto free_device; 1220 goto free_device;
@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1476 return 0; 1476 return 0;
1477 1477
1478free_ring: 1478free_ring:
1479 pci_free_consistent(pdev, 1479 dma_free_coherent(&pdev->dev,
1480 sizeof(struct boom_rx_desc) * RX_RING_SIZE 1480 sizeof(struct boom_rx_desc) * RX_RING_SIZE +
1481 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1481 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1482 vp->rx_ring, 1482 vp->rx_ring, vp->rx_ring_dma);
1483 vp->rx_ring_dma);
1484free_device: 1483free_device:
1485 free_netdev(dev); 1484 free_netdev(dev);
1486 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); 1485 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
1751 break; /* Bad news! */ 1750 break; /* Bad news! */
1752 1751
1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1752 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data, 1753 dma = dma_map_single(vp->gendev, skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1754 PKT_BUF_SZ, DMA_FROM_DEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) 1755 if (dma_mapping_error(vp->gendev, dma))
1757 break; 1756 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma); 1757 vp->rx_ring[i].addr = cpu_to_le32(dma);
1759 } 1758 }
@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 if (vp->bus_master) { 2066 if (vp->bus_master) {
2068 /* Set the bus-master controller to transfer the packet. */ 2067 /* Set the bus-master controller to transfer the packet. */
2069 int len = (skb->len + 3) & ~3; 2068 int len = (skb->len + 3) & ~3;
2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2069 vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
2071 PCI_DMA_TODEVICE); 2070 DMA_TO_DEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { 2071 if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb); 2072 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++; 2073 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK; 2074 return NETDEV_TX_OK;
@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2168 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2167 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2169 2168
2170 if (!skb_shinfo(skb)->nr_frags) { 2169 if (!skb_shinfo(skb)->nr_frags) {
2171 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, 2170 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
2172 PCI_DMA_TODEVICE); 2171 DMA_TO_DEVICE);
2173 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2172 if (dma_mapping_error(vp->gendev, dma_addr))
2174 goto out_dma_err; 2173 goto out_dma_err;
2175 2174
2176 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); 2175 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2178 } else { 2177 } else {
2179 int i; 2178 int i;
2180 2179
2181 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, 2180 dma_addr = dma_map_single(vp->gendev, skb->data,
2182 skb_headlen(skb), PCI_DMA_TODEVICE); 2181 skb_headlen(skb), DMA_TO_DEVICE);
2183 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2182 if (dma_mapping_error(vp->gendev, dma_addr))
2184 goto out_dma_err; 2183 goto out_dma_err;
2185 2184
2186 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); 2185 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2188 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2190 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2189 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2191 2190
2192 dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, 2191 dma_addr = skb_frag_dma_map(vp->gendev, frag,
2193 0, 2192 0,
2194 frag->size, 2193 frag->size,
2195 DMA_TO_DEVICE); 2194 DMA_TO_DEVICE);
2196 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { 2195 if (dma_mapping_error(vp->gendev, dma_addr)) {
2197 for(i = i-1; i >= 0; i--) 2196 for(i = i-1; i >= 0; i--)
2198 dma_unmap_page(&VORTEX_PCI(vp)->dev, 2197 dma_unmap_page(vp->gendev,
2199 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), 2198 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2200 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), 2199 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2201 DMA_TO_DEVICE); 2200 DMA_TO_DEVICE);
2202 2201
2203 pci_unmap_single(VORTEX_PCI(vp), 2202 dma_unmap_single(vp->gendev,
2204 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2203 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2205 le32_to_cpu(vp->tx_ring[entry].frag[0].length), 2204 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2206 PCI_DMA_TODEVICE); 2205 DMA_TO_DEVICE);
2207 2206
2208 goto out_dma_err; 2207 goto out_dma_err;
2209 } 2208 }
@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2218 } 2217 }
2219 } 2218 }
2220#else 2219#else
2221 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); 2220 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
2222 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2221 if (dma_mapping_error(vp->gendev, dma_addr))
2223 goto out_dma_err; 2222 goto out_dma_err;
2224 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); 2223 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2225 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); 2224 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2254out: 2253out:
2255 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
2256out_dma_err: 2255out_dma_err:
2257 dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); 2256 dev_err(vp->gendev, "Error mapping dma buffer\n");
2258 goto out; 2257 goto out;
2259} 2258}
2260 2259
@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
2322 if (status & DMADone) { 2321 if (status & DMADone) {
2323 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { 2322 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2324 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ 2323 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2325 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); 2324 dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
2326 pkts_compl++; 2325 pkts_compl++;
2327 bytes_compl += vp->tx_skb->len; 2326 bytes_compl += vp->tx_skb->len;
2328 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ 2327 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
2459 struct sk_buff *skb = vp->tx_skbuff[entry]; 2458 struct sk_buff *skb = vp->tx_skbuff[entry];
2460#if DO_ZEROCOPY 2459#if DO_ZEROCOPY
2461 int i; 2460 int i;
2462 pci_unmap_single(VORTEX_PCI(vp), 2461 dma_unmap_single(vp->gendev,
2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2462 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, 2463 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2465 PCI_DMA_TODEVICE); 2464 DMA_TO_DEVICE);
2466 2465
2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) 2466 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
2468 pci_unmap_page(VORTEX_PCI(vp), 2467 dma_unmap_page(vp->gendev,
2469 le32_to_cpu(vp->tx_ring[entry].frag[i].addr), 2468 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2470 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, 2469 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2471 PCI_DMA_TODEVICE); 2470 DMA_TO_DEVICE);
2472#else 2471#else
2473 pci_unmap_single(VORTEX_PCI(vp), 2472 dma_unmap_single(vp->gendev,
2474 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); 2473 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
2475#endif 2474#endif
2476 pkts_compl++; 2475 pkts_compl++;
2477 bytes_compl += skb->len; 2476 bytes_compl += skb->len;
@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
2561 /* 'skb_put()' points to the start of sk_buff data area. */ 2560 /* 'skb_put()' points to the start of sk_buff data area. */
2562 if (vp->bus_master && 2561 if (vp->bus_master &&
2563 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { 2562 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2564 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), 2563 dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
2565 pkt_len, PCI_DMA_FROMDEVICE); 2564 pkt_len, DMA_FROM_DEVICE);
2566 iowrite32(dma, ioaddr + Wn7_MasterAddr); 2565 iowrite32(dma, ioaddr + Wn7_MasterAddr);
2567 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); 2566 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2568 iowrite16(StartDMAUp, ioaddr + EL3_CMD); 2567 iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2569 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) 2568 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2570 ; 2569 ;
2571 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); 2570 dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
2572 } else { 2571 } else {
2573 ioread32_rep(ioaddr + RX_FIFO, 2572 ioread32_rep(ioaddr + RX_FIFO,
2574 skb_put(skb, pkt_len), 2573 skb_put(skb, pkt_len),
@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
2635 if (pkt_len < rx_copybreak && 2634 if (pkt_len < rx_copybreak &&
2636 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { 2635 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
2637 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2638 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2637 dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2639 /* 'skb_put()' points to the start of sk_buff data area. */ 2638 /* 'skb_put()' points to the start of sk_buff data area. */
2640 skb_put_data(skb, vp->rx_skbuff[entry]->data, 2639 skb_put_data(skb, vp->rx_skbuff[entry]->data,
2641 pkt_len); 2640 pkt_len);
2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2641 dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2643 vp->rx_copy++; 2642 vp->rx_copy++;
2644 } else { 2643 } else {
2645 /* Pre-allocate the replacement skb. If it or its 2644 /* Pre-allocate the replacement skb. If it or its
@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
2651 dev->stats.rx_dropped++; 2650 dev->stats.rx_dropped++;
2652 goto clear_complete; 2651 goto clear_complete;
2653 } 2652 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, 2653 newdma = dma_map_single(vp->gendev, newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2654 PKT_BUF_SZ, DMA_FROM_DEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { 2655 if (dma_mapping_error(vp->gendev, newdma)) {
2657 dev->stats.rx_dropped++; 2656 dev->stats.rx_dropped++;
2658 consume_skb(newskb); 2657 consume_skb(newskb);
2659 goto clear_complete; 2658 goto clear_complete;
@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
2664 vp->rx_skbuff[entry] = newskb; 2663 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma); 2664 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2666 skb_put(skb, pkt_len); 2665 skb_put(skb, pkt_len);
2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2666 dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2668 vp->rx_nocopy++; 2667 vp->rx_nocopy++;
2669 } 2668 }
2670 skb->protocol = eth_type_trans(skb, dev); 2669 skb->protocol = eth_type_trans(skb, dev);
@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
2761 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ 2760 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2762 for (i = 0; i < RX_RING_SIZE; i++) 2761 for (i = 0; i < RX_RING_SIZE; i++)
2763 if (vp->rx_skbuff[i]) { 2762 if (vp->rx_skbuff[i]) {
2764 pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), 2763 dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
2765 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2764 PKT_BUF_SZ, DMA_FROM_DEVICE);
2766 dev_kfree_skb(vp->rx_skbuff[i]); 2765 dev_kfree_skb(vp->rx_skbuff[i]);
2767 vp->rx_skbuff[i] = NULL; 2766 vp->rx_skbuff[i] = NULL;
2768 } 2767 }
@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
2775 int k; 2774 int k;
2776 2775
2777 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) 2776 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2778 pci_unmap_single(VORTEX_PCI(vp), 2777 dma_unmap_single(vp->gendev,
2779 le32_to_cpu(vp->tx_ring[i].frag[k].addr), 2778 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2780 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, 2779 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2781 PCI_DMA_TODEVICE); 2780 DMA_TO_DEVICE);
2782#else 2781#else
2783 pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); 2782 dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
2784#endif 2783#endif
2785 dev_kfree_skb(skb); 2784 dev_kfree_skb(skb);
2786 vp->tx_skbuff[i] = NULL; 2785 vp->tx_skbuff[i] = NULL;
@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
3288 3287
3289 pci_iounmap(pdev, vp->ioaddr); 3288 pci_iounmap(pdev, vp->ioaddr);
3290 3289
3291 pci_free_consistent(pdev, 3290 dma_free_coherent(&pdev->dev,
3292 sizeof(struct boom_rx_desc) * RX_RING_SIZE 3291 sizeof(struct boom_rx_desc) * RX_RING_SIZE +
3293 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 3292 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3294 vp->rx_ring, 3293 vp->rx_ring, vp->rx_ring_dma);
3295 vp->rx_ring_dma);
3296 3294
3297 pci_release_regions(pdev); 3295 pci_release_regions(pdev);
3298 3296
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index ac99d089ac72..1c97e39b478e 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -164,7 +164,9 @@ bad_clone_list[] __initdata = {
164#define NESM_START_PG 0x40 /* First page of TX buffer */ 164#define NESM_START_PG 0x40 /* First page of TX buffer */
165#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 165#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
166 166
167#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ 167#if defined(CONFIG_MACH_TX49XX)
168# define DCR_VAL 0x48 /* 8-bit mode */
169#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
168# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) 170# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
169#else 171#else
170# define DCR_VAL 0x49 172# define DCR_VAL 0x49
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a561705f232c..be198cc0b10c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1552 if (!ioaddr) { 1552 if (!ioaddr) {
1553 if (pcnet32_debug & NETIF_MSG_PROBE) 1553 if (pcnet32_debug & NETIF_MSG_PROBE)
1554 pr_err("card has no PCI IO resources, aborting\n"); 1554 pr_err("card has no PCI IO resources, aborting\n");
1555 return -ENODEV; 1555 err = -ENODEV;
1556 goto err_disable_dev;
1556 } 1557 }
1557 1558
1558 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); 1559 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1559 if (err) { 1560 if (err) {
1560 if (pcnet32_debug & NETIF_MSG_PROBE) 1561 if (pcnet32_debug & NETIF_MSG_PROBE)
1561 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1562 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1562 return err; 1563 goto err_disable_dev;
1563 } 1564 }
1564 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1565 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1565 if (pcnet32_debug & NETIF_MSG_PROBE) 1566 if (pcnet32_debug & NETIF_MSG_PROBE)
1566 pr_err("io address range already allocated\n"); 1567 pr_err("io address range already allocated\n");
1567 return -EBUSY; 1568 err = -EBUSY;
1569 goto err_disable_dev;
1568 } 1570 }
1569 1571
1570 err = pcnet32_probe1(ioaddr, 1, pdev); 1572 err = pcnet32_probe1(ioaddr, 1, pdev);
1573
1574err_disable_dev:
1571 if (err < 0) 1575 if (err < 0)
1572 pci_disable_device(pdev); 1576 pci_disable_device(pdev);
1573 1577
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index b57acb8dc35b..dc25066c59a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
419 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 419 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
420 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 420 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
421 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 421 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
422 {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ 422 {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
423 {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ 423 {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
424 {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ 424 {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
425 {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ 425 {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
426 {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ 426 {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
427 {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ 427 {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
428 {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ 428 {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
429 {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ 429 {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
430 {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ 430 {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
431}; 431};
432 432
433static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { 433static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
@@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
444 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 444 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
445 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 445 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
446 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 446 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
447 {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
448 {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
449 {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
450 {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
451 {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
452 {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
453 {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
454 {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
455 {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
456 {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
457}; 447};
458 448
459static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { 449static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index db92f1858060..b76447baccaf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
836{ 836{
837 struct tp_params *tp = &adap->params.tp; 837 struct tp_params *tp = &adap->params.tp;
838 u64 hash_filter_mask = tp->hash_filter_mask; 838 u64 hash_filter_mask = tp->hash_filter_mask;
839 u32 mask; 839 u64 ntuple_mask = 0;
840 840
841 if (!is_hashfilter(adap)) 841 if (!is_hashfilter(adap))
842 return false; 842 return false;
@@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
865 if (!fs->val.fport || fs->mask.fport != 0xffff) 865 if (!fs->val.fport || fs->mask.fport != 0xffff)
866 return false; 866 return false;
867 867
868 if (tp->fcoe_shift >= 0) { 868 /* calculate tuple mask and compare with mask configured in hw */
869 mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W; 869 if (tp->fcoe_shift >= 0)
870 if (mask && !fs->mask.fcoe) 870 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
871 return false;
872 }
873 871
874 if (tp->port_shift >= 0) { 872 if (tp->port_shift >= 0)
875 mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W; 873 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
876 if (mask && !fs->mask.iport)
877 return false;
878 }
879 874
880 if (tp->vnic_shift >= 0) { 875 if (tp->vnic_shift >= 0) {
881 mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W; 876 if ((adap->params.tp.ingress_config & VNIC_F))
882 877 ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
883 if ((adap->params.tp.ingress_config & VNIC_F)) { 878 else
884 if (mask && !fs->mask.pfvf_vld) 879 ntuple_mask |= (u64)fs->mask.ovlan_vld <<
885 return false; 880 tp->vnic_shift;
886 } else {
887 if (mask && !fs->mask.ovlan_vld)
888 return false;
889 }
890 } 881 }
891 882
892 if (tp->vlan_shift >= 0) { 883 if (tp->vlan_shift >= 0)
893 mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W; 884 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
894 if (mask && !fs->mask.ivlan)
895 return false;
896 }
897 885
898 if (tp->tos_shift >= 0) { 886 if (tp->tos_shift >= 0)
899 mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W; 887 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
900 if (mask && !fs->mask.tos)
901 return false;
902 }
903 888
904 if (tp->protocol_shift >= 0) { 889 if (tp->protocol_shift >= 0)
905 mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W; 890 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
906 if (mask && !fs->mask.proto)
907 return false;
908 }
909 891
910 if (tp->ethertype_shift >= 0) { 892 if (tp->ethertype_shift >= 0)
911 mask = (hash_filter_mask >> tp->ethertype_shift) & 893 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
912 FT_ETHERTYPE_W;
913 if (mask && !fs->mask.ethtype)
914 return false;
915 }
916 894
917 if (tp->macmatch_shift >= 0) { 895 if (tp->macmatch_shift >= 0)
918 mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W; 896 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
919 if (mask && !fs->mask.macidx) 897
920 return false; 898 if (tp->matchtype_shift >= 0)
921 } 899 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
900
901 if (tp->frag_shift >= 0)
902 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
903
904 if (ntuple_mask != hash_filter_mask)
905 return false;
922 906
923 if (tp->matchtype_shift >= 0) {
924 mask = (hash_filter_mask >> tp->matchtype_shift) &
925 FT_MPSHITTYPE_W;
926 if (mask && !fs->mask.matchtype)
927 return false;
928 }
929 if (tp->frag_shift >= 0) {
930 mask = (hash_filter_mask >> tp->frag_shift) &
931 FT_FRAGMENTATION_W;
932 if (mask && !fs->mask.frag)
933 return false;
934 }
935 return true; 907 return true;
936} 908}
937 909
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 81684acf52af..8a8b12b720ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2747 pci_set_master(pdev); 2747 pci_set_master(pdev);
2748 2748
2749 /* Query PCI controller on system for DMA addressing 2749 /* Query PCI controller on system for DMA addressing
2750 * limitation for the device. Try 64-bit first, and 2750 * limitation for the device. Try 47-bit first, and
2751 * fail to 32-bit. 2751 * fail to 32-bit.
2752 */ 2752 */
2753 2753
2754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
2755 if (err) { 2755 if (err) {
2756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2757 if (err) { 2757 if (err) {
@@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2765 goto err_out_release_regions; 2765 goto err_out_release_regions;
2766 } 2766 }
2767 } else { 2767 } else {
2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
2769 if (err) { 2769 if (err) {
2770 dev_err(dev, "Unable to obtain %u-bit DMA " 2770 dev_err(dev, "Unable to obtain %u-bit DMA "
2771 "for consistent allocations, aborting\n", 64); 2771 "for consistent allocations, aborting\n", 47);
2772 goto err_out_release_regions; 2772 goto err_out_release_regions;
2773 } 2773 }
2774 using_dac = 1; 2774 using_dac = 1;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4604bc8eb5b..9d3eed46830d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f81439796ac7..43d973215040 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -1,20 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
3 * 4 *
4 * Copyright (C) 2012 Freescale Semiconductor, Inc. 5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 6 */
19 7
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e8d6a6f6aaf..5ec1185808e5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
192 if (adapter->fw_done_rc) { 192 if (adapter->fw_done_rc) {
193 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 193 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
194 adapter->fw_done_rc); 194 adapter->fw_done_rc);
195 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
195 return -1; 196 return -1;
196 } 197 }
197 return 0; 198 return 0;
@@ -795,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev)
795 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 796 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
796 unsigned long timeout = msecs_to_jiffies(30000); 797 unsigned long timeout = msecs_to_jiffies(30000);
797 int retry_count = 0; 798 int retry_count = 0;
799 bool retry;
798 int rc; 800 int rc;
799 801
800 do { 802 do {
803 retry = false;
801 if (retry_count > IBMVNIC_MAX_QUEUES) { 804 if (retry_count > IBMVNIC_MAX_QUEUES) {
802 netdev_warn(netdev, "Login attempts exceeded\n"); 805 netdev_warn(netdev, "Login attempts exceeded\n");
803 return -1; 806 return -1;
@@ -821,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev)
821 retry_count++; 824 retry_count++;
822 release_sub_crqs(adapter, 1); 825 release_sub_crqs(adapter, 1);
823 826
827 retry = true;
828 netdev_dbg(netdev,
829 "Received partial success, retrying...\n");
824 adapter->init_done_rc = 0; 830 adapter->init_done_rc = 0;
825 reinit_completion(&adapter->init_done); 831 reinit_completion(&adapter->init_done);
826 send_cap_queries(adapter); 832 send_cap_queries(adapter);
@@ -848,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev)
848 netdev_warn(netdev, "Adapter login failed\n"); 854 netdev_warn(netdev, "Adapter login failed\n");
849 return -1; 855 return -1;
850 } 856 }
851 } while (adapter->init_done_rc == PARTIALSUCCESS); 857 } while (retry);
852 858
853 /* handle pending MAC address changes after successful login */ 859 /* handle pending MAC address changes after successful login */
854 if (adapter->mac_change_pending) { 860 if (adapter->mac_change_pending) {
@@ -1821,9 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1821 if (rc) 1827 if (rc)
1822 return rc; 1828 return rc;
1823 } 1829 }
1830 ibmvnic_disable_irqs(adapter);
1824 } 1831 }
1825
1826 ibmvnic_disable_irqs(adapter);
1827 adapter->state = VNIC_CLOSED; 1832 adapter->state = VNIC_CLOSED;
1828 1833
1829 if (reset_state == VNIC_CLOSED) 1834 if (reset_state == VNIC_CLOSED)
@@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2617{ 2622{
2618 struct device *dev = &adapter->vdev->dev; 2623 struct device *dev = &adapter->vdev->dev;
2619 unsigned long rc; 2624 unsigned long rc;
2620 u64 val;
2621 2625
2622 if (scrq->hw_irq > 0x100000000ULL) { 2626 if (scrq->hw_irq > 0x100000000ULL) {
2623 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2627 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2624 return 1; 2628 return 1;
2625 } 2629 }
2626 2630
2627 val = (0xff000000) | scrq->hw_irq; 2631 if (adapter->resetting &&
2628 rc = plpar_hcall_norets(H_EOI, val); 2632 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2629 if (rc) 2633 u64 val = (0xff000000) | scrq->hw_irq;
2630 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2634
2631 val, rc); 2635 rc = plpar_hcall_norets(H_EOI, val);
2636 if (rc)
2637 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2638 val, rc);
2639 }
2632 2640
2633 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2641 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2634 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2642 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
@@ -4586,14 +4594,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4586 release_crq_queue(adapter); 4594 release_crq_queue(adapter);
4587 } 4595 }
4588 4596
4589 rc = init_stats_buffers(adapter);
4590 if (rc)
4591 return rc;
4592
4593 rc = init_stats_token(adapter);
4594 if (rc)
4595 return rc;
4596
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
@@ -4662,13 +4662,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4662 goto ibmvnic_init_fail; 4662 goto ibmvnic_init_fail;
4663 } while (rc == EAGAIN); 4663 } while (rc == EAGAIN);
4664 4664
4665 rc = init_stats_buffers(adapter);
4666 if (rc)
4667 goto ibmvnic_init_fail;
4668
4669 rc = init_stats_token(adapter);
4670 if (rc)
4671 goto ibmvnic_stats_fail;
4672
4665 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4673 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4666 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4674 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4667 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4675 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4668 4676
4669 rc = device_create_file(&dev->dev, &dev_attr_failover); 4677 rc = device_create_file(&dev->dev, &dev_attr_failover);
4670 if (rc) 4678 if (rc)
4671 goto ibmvnic_init_fail; 4679 goto ibmvnic_dev_file_err;
4672 4680
4673 netif_carrier_off(netdev); 4681 netif_carrier_off(netdev);
4674 rc = register_netdev(netdev); 4682 rc = register_netdev(netdev);
@@ -4687,6 +4695,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4687ibmvnic_register_fail: 4695ibmvnic_register_fail:
4688 device_remove_file(&dev->dev, &dev_attr_failover); 4696 device_remove_file(&dev->dev, &dev_attr_failover);
4689 4697
4698ibmvnic_dev_file_err:
4699 release_stats_token(adapter);
4700
4701ibmvnic_stats_fail:
4702 release_stats_buffers(adapter);
4703
4690ibmvnic_init_fail: 4704ibmvnic_init_fail:
4691 release_sub_crqs(adapter, 1); 4705 release_sub_crqs(adapter, 1);
4692 release_crq_queue(adapter); 4706 release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index a822f7a56bc5..685337d58276 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -43,12 +43,12 @@
43#include "fw.h" 43#include "fw.h"
44 44
45/* 45/*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB 46 * We allocate in page size (default 4KB on many archs) chunks to avoid high
47 * per chunk. 47 * order memory allocations in fragmented/high usage memory situation.
48 */ 48 */
49enum { 49enum {
50 MLX4_ICM_ALLOC_SIZE = 1 << 18, 50 MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
51 MLX4_TABLE_CHUNK_SIZE = 1 << 18 51 MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
52}; 52};
53 53
54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
398 u64 size; 398 u64 size;
399 399
400 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; 400 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
401 if (WARN_ON(!obj_per_chunk))
402 return -EINVAL;
401 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; 403 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
402 404
403 table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); 405 table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
404 if (!table->icm) 406 if (!table->icm)
405 return -ENOMEM; 407 return -ENOMEM;
406 table->virt = virt; 408 table->virt = virt;
@@ -446,7 +448,7 @@ err:
446 mlx4_free_icm(dev, table->icm[i], use_coherent); 448 mlx4_free_icm(dev, table->icm[i], use_coherent);
447 } 449 }
448 450
449 kfree(table->icm); 451 kvfree(table->icm);
450 452
451 return -ENOMEM; 453 return -ENOMEM;
452} 454}
@@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
462 mlx4_free_icm(dev, table->icm[i], table->coherent); 464 mlx4_free_icm(dev, table->icm[i], table->coherent);
463 } 465 }
464 466
465 kfree(table->icm); 467 kvfree(table->icm);
466} 468}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 2edcce98ab2d..65482f004e50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
172 list_add_tail(&dev_ctx->list, &priv->ctx_list); 172 list_add_tail(&dev_ctx->list, &priv->ctx_list);
173 spin_unlock_irqrestore(&priv->ctx_lock, flags); 173 spin_unlock_irqrestore(&priv->ctx_lock, flags);
174 174
175 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", 175 mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
176 dev_ctx->intf->protocol, enable ? 176 dev_ctx->intf->protocol, enable ?
177 "enabled" : "disabled"); 177 "enabled" : "disabled");
178 } 178 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 211578ffc70d..60172a38c4a4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2929 mlx4_err(dev, "Failed to create file for port %d\n", port); 2929 mlx4_err(dev, "Failed to create file for port %d\n", port);
2930 devlink_port_unregister(&info->devlink_port); 2930 devlink_port_unregister(&info->devlink_port);
2931 info->port = -1; 2931 info->port = -1;
2932 return err;
2932 } 2933 }
2933 2934
2934 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2935 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2950 &info->port_attr); 2951 &info->port_attr);
2951 devlink_port_unregister(&info->devlink_port); 2952 devlink_port_unregister(&info->devlink_port);
2952 info->port = -1; 2953 info->port = -1;
2954 return err;
2953 } 2955 }
2954 2956
2955 return err; 2957 return 0;
2956} 2958}
2957 2959
2958static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2960static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 3aaf4bad6c5a..427e7a31862c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
394 struct mlx4_qp *qp; 394 struct mlx4_qp *qp;
395 395
396 spin_lock(&qp_table->lock); 396 spin_lock_irq(&qp_table->lock);
397 397
398 qp = __mlx4_qp_lookup(dev, qpn); 398 qp = __mlx4_qp_lookup(dev, qpn);
399 399
400 spin_unlock(&qp_table->lock); 400 spin_unlock_irq(&qp_table->lock);
401 return qp; 401 return qp;
402} 402}
403 403
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 176645762e49..1ff0b0e93804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
615 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); 615 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
616} 616}
617 617
618static __be32 mlx5e_get_fcs(struct sk_buff *skb)
619{
620 int last_frag_sz, bytes_in_prev, nr_frags;
621 u8 *fcs_p1, *fcs_p2;
622 skb_frag_t *last_frag;
623 __be32 fcs_bytes;
624
625 if (!skb_is_nonlinear(skb))
626 return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
627
628 nr_frags = skb_shinfo(skb)->nr_frags;
629 last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
630 last_frag_sz = skb_frag_size(last_frag);
631
632 /* If all FCS data is in last frag */
633 if (last_frag_sz >= ETH_FCS_LEN)
634 return *(__be32 *)(skb_frag_address(last_frag) +
635 last_frag_sz - ETH_FCS_LEN);
636
637 fcs_p2 = (u8 *)skb_frag_address(last_frag);
638 bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
639
640 /* Find where the other part of the FCS is - Linear or another frag */
641 if (nr_frags == 1) {
642 fcs_p1 = skb_tail_pointer(skb);
643 } else {
644 skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
645
646 fcs_p1 = skb_frag_address(prev_frag) +
647 skb_frag_size(prev_frag);
648 }
649 fcs_p1 -= bytes_in_prev;
650
651 memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
652 memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
653
654 return fcs_bytes;
655}
656
618static inline void mlx5e_handle_csum(struct net_device *netdev, 657static inline void mlx5e_handle_csum(struct net_device *netdev,
619 struct mlx5_cqe64 *cqe, 658 struct mlx5_cqe64 *cqe,
620 struct mlx5e_rq *rq, 659 struct mlx5e_rq *rq,
@@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
643 skb->csum = csum_partial(skb->data + ETH_HLEN, 682 skb->csum = csum_partial(skb->data + ETH_HLEN,
644 network_depth - ETH_HLEN, 683 network_depth - ETH_HLEN,
645 skb->csum); 684 skb->csum);
685 if (unlikely(netdev->features & NETIF_F_RXFCS))
686 skb->csum = csum_add(skb->csum,
687 (__force __wsum)mlx5e_get_fcs(skb));
646 rq->stats.csum_complete++; 688 rq->stats.csum_complete++;
647 return; 689 return;
648 } 690 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0f5da499a223..fad8c2e3804e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
237 context->buf.sg[0].data = &context->command; 237 context->buf.sg[0].data = &context->command;
238 238
239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); 239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
240 list_add_tail(&context->list, &fdev->ipsec->pending_cmds); 240 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
241 if (!res)
242 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
241 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); 243 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
242 244
243 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
244 if (res) { 245 if (res) {
245 mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", 246 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
246 res);
247 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
248 list_del(&context->list);
249 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
250 kfree(context); 247 kfree(context);
251 return ERR_PTR(res); 248 return ERR_PTR(res);
252 } 249 }
250
253 /* Context will be freed by wait func after completion */ 251 /* Context will be freed by wait func after completion */
254 return context; 252 return context;
255} 253}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 1dc424685f4e..35fb31f682af 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
335 return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); 335 return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
336 336
337 start = mem; 337 start = mem;
338 while (mem - start + 8 < nfp_cpp_area_size(area)) { 338 while (mem - start + 8 <= nfp_cpp_area_size(area)) {
339 u8 __iomem *value; 339 u8 __iomem *value;
340 u32 type, length; 340 u32 type, length;
341 341
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 00f41c145d4d..820b226d6ff8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -77,7 +77,7 @@
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78 78
79/* ILT entry structure */ 79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL 80#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0 81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL 82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52 83#define ILT_ENTRY_VALID_SHIFT 52
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 38502815d681..468c59d2e491 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
292 struct qed_ll2_tx_packet *p_pkt = NULL; 292 struct qed_ll2_tx_packet *p_pkt = NULL;
293 struct qed_ll2_info *p_ll2_conn; 293 struct qed_ll2_info *p_ll2_conn;
294 struct qed_ll2_tx_queue *p_tx; 294 struct qed_ll2_tx_queue *p_tx;
295 unsigned long flags = 0;
295 dma_addr_t tx_frag; 296 dma_addr_t tx_frag;
296 297
297 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 298 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
300 301
301 p_tx = &p_ll2_conn->tx_queue; 302 p_tx = &p_ll2_conn->tx_queue;
302 303
304 spin_lock_irqsave(&p_tx->lock, flags);
303 while (!list_empty(&p_tx->active_descq)) { 305 while (!list_empty(&p_tx->active_descq)) {
304 p_pkt = list_first_entry(&p_tx->active_descq, 306 p_pkt = list_first_entry(&p_tx->active_descq,
305 struct qed_ll2_tx_packet, list_entry); 307 struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
309 list_del(&p_pkt->list_entry); 311 list_del(&p_pkt->list_entry);
310 b_last_packet = list_empty(&p_tx->active_descq); 312 b_last_packet = list_empty(&p_tx->active_descq);
311 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 313 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
314 spin_unlock_irqrestore(&p_tx->lock, flags);
312 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 315 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
313 struct qed_ooo_buffer *p_buffer; 316 struct qed_ooo_buffer *p_buffer;
314 317
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
328 b_last_frag, 331 b_last_frag,
329 b_last_packet); 332 b_last_packet);
330 } 333 }
334 spin_lock_irqsave(&p_tx->lock, flags);
331 } 335 }
336 spin_unlock_irqrestore(&p_tx->lock, flags);
332} 337}
333 338
334static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) 339static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
556 struct qed_ll2_info *p_ll2_conn = NULL; 561 struct qed_ll2_info *p_ll2_conn = NULL;
557 struct qed_ll2_rx_packet *p_pkt = NULL; 562 struct qed_ll2_rx_packet *p_pkt = NULL;
558 struct qed_ll2_rx_queue *p_rx; 563 struct qed_ll2_rx_queue *p_rx;
564 unsigned long flags = 0;
559 565
560 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 566 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
561 if (!p_ll2_conn) 567 if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
563 569
564 p_rx = &p_ll2_conn->rx_queue; 570 p_rx = &p_ll2_conn->rx_queue;
565 571
572 spin_lock_irqsave(&p_rx->lock, flags);
566 while (!list_empty(&p_rx->active_descq)) { 573 while (!list_empty(&p_rx->active_descq)) {
567 p_pkt = list_first_entry(&p_rx->active_descq, 574 p_pkt = list_first_entry(&p_rx->active_descq,
568 struct qed_ll2_rx_packet, list_entry); 575 struct qed_ll2_rx_packet, list_entry);
569 if (!p_pkt) 576 if (!p_pkt)
570 break; 577 break;
571
572 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 578 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
579 spin_unlock_irqrestore(&p_rx->lock, flags);
573 580
574 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 581 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
575 struct qed_ooo_buffer *p_buffer; 582 struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
588 cookie, 595 cookie,
589 rx_buf_addr, b_last); 596 rx_buf_addr, b_last);
590 } 597 }
598 spin_lock_irqsave(&p_rx->lock, flags);
591 } 599 }
600 spin_unlock_irqrestore(&p_rx->lock, flags);
601}
602
603static bool
604qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
605 struct core_rx_slow_path_cqe *p_cqe)
606{
607 struct ooo_opaque *iscsi_ooo;
608 u32 cid;
609
610 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
611 return false;
612
613 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
614 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
615 return false;
616
617 /* Need to make a flush */
618 cid = le32_to_cpu(iscsi_ooo->cid);
619 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
620
621 return true;
592} 622}
593 623
594static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, 624static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
@@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
617 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 647 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
618 cqe_type = cqe->rx_cqe_sp.type; 648 cqe_type = cqe->rx_cqe_sp.type;
619 649
650 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
651 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
652 &cqe->rx_cqe_sp))
653 continue;
654
620 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { 655 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
621 DP_NOTICE(p_hwfn, 656 DP_NOTICE(p_hwfn,
622 "Got a non-regular LB LL2 completion [type 0x%02x]\n", 657 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
@@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
794 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; 829 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
795 int rc; 830 int rc;
796 831
832 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
833 return 0;
834
797 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); 835 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
798 if (rc) 836 if (rc)
799 return rc; 837 return rc;
@@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
814 u16 new_idx = 0, num_bds = 0; 852 u16 new_idx = 0, num_bds = 0;
815 int rc; 853 int rc;
816 854
855 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
856 return 0;
857
817 new_idx = le16_to_cpu(*p_tx->p_fw_cons); 858 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
818 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 859 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
819 860
@@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
1867 1908
1868 /* Stop Tx & Rx of connection, if needed */ 1909 /* Stop Tx & Rx of connection, if needed */
1869 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { 1910 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1911 p_ll2_conn->tx_queue.b_cb_registred = false;
1912 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1870 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); 1913 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1871 if (rc) 1914 if (rc)
1872 goto out; 1915 goto out;
1916
1873 qed_ll2_txq_flush(p_hwfn, connection_handle); 1917 qed_ll2_txq_flush(p_hwfn, connection_handle);
1918 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1874 } 1919 }
1875 1920
1876 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { 1921 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1922 p_ll2_conn->rx_queue.b_cb_registred = false;
1923 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1877 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); 1924 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1878 if (rc) 1925 if (rc)
1879 goto out; 1926 goto out;
1927
1880 qed_ll2_rxq_flush(p_hwfn, connection_handle); 1928 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1929 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1881 } 1930 }
1882 1931
1883 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) 1932 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
@@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
1925 if (!p_ll2_conn) 1974 if (!p_ll2_conn)
1926 return; 1975 return;
1927 1976
1928 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1929 p_ll2_conn->rx_queue.b_cb_registred = false;
1930 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1931 }
1932
1933 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1934 p_ll2_conn->tx_queue.b_cb_registred = false;
1935 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1936 }
1937
1938 kfree(p_ll2_conn->tx_queue.descq_mem); 1977 kfree(p_ll2_conn->tx_queue.descq_mem);
1939 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); 1978 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1940 1979
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a01e7d6e5442..f6655e251bbd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1066 1066
1067 DP_INFO(edev, "Starting qede_remove\n"); 1067 DP_INFO(edev, "Starting qede_remove\n");
1068 1068
1069 qede_rdma_dev_remove(edev);
1069 unregister_netdev(ndev); 1070 unregister_netdev(ndev);
1070 cancel_delayed_work_sync(&edev->sp_task); 1071 cancel_delayed_work_sync(&edev->sp_task);
1071 1072
1072 qede_ptp_disable(edev); 1073 qede_ptp_disable(edev);
1073 1074
1074 qede_rdma_dev_remove(edev);
1075
1076 edev->ops->common->set_power_state(cdev, PCI_D0); 1075 edev->ops->common->set_power_state(cdev, PCI_D0);
1077 1076
1078 pci_set_drvdata(pdev, NULL); 1077 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a5b792ce2ae7..1bf930d4a1e5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -163,7 +163,7 @@ enum {
163}; 163};
164 164
165/* Driver's parameters */ 165/* Driver's parameters */
166#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 166#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS)
167#define SH_ETH_RX_ALIGN 32 167#define SH_ETH_RX_ALIGN 32
168#else 168#else
169#define SH_ETH_RX_ALIGN 2 169#define SH_ETH_RX_ALIGN 2
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 450eec264a5e..4377c26f714d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused,
792 break; 792 break;
793 793
794 case NETDEV_CHANGEADDR: 794 case NETDEV_CHANGEADDR:
795 list_for_each_entry(ipvlan, &port->ipvlans, pnode) 795 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
796 ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); 796 ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
797 call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
798 }
797 break; 799 break;
798 800
799 case NETDEV_PRE_TYPE_CHANGE: 801 case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 6838129839ca..e757b09f1889 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
61 return rc; 61 return rc;
62 62
63 /* make rcal=100, since rdb default is 000 */ 63 /* make rcal=100, since rdb default is 000 */
64 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); 64 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
65 if (rc < 0) 65 if (rc < 0)
66 return rc; 66 return rc;
67 67
68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ 68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
69 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); 69 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
70 if (rc < 0) 70 if (rc < 0)
71 return rc; 71 return rc;
72 72
73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ 73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
74 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); 74 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
75 75
76 return 0; 76 return 0;
77} 77}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5ad130c3da43..d5e0833d69b9 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
56 /* The register must be written to both the Shadow Register Select and 56 /* The register must be written to both the Shadow Register Select and
57 * the Shadow Read Register Selector 57 * the Shadow Read Register Selector
58 */ 58 */
59 phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | 59 phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
60 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); 60 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
61 return phy_read(phydev, MII_BCM54XX_AUX_CTL); 61 return phy_read(phydev, MII_BCM54XX_AUX_CTL);
62} 62}
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 7c73808cbbde..81cceaa412fe 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -14,11 +14,18 @@
14#ifndef _LINUX_BCM_PHY_LIB_H 14#ifndef _LINUX_BCM_PHY_LIB_H
15#define _LINUX_BCM_PHY_LIB_H 15#define _LINUX_BCM_PHY_LIB_H
16 16
17#include <linux/brcmphy.h>
17#include <linux/phy.h> 18#include <linux/phy.h>
18 19
19int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); 20int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
20int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); 21int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
21 22
23static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
24 u16 reg, u16 val)
25{
26 return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
27}
28
22int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); 29int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
23int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); 30int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
24 31
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 29b1c88b55cc..01d2ff2f6241 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
65static void r_rc_cal_reset(struct phy_device *phydev) 65static void r_rc_cal_reset(struct phy_device *phydev)
66{ 66{
67 /* Reset R_CAL/RC_CAL Engine */ 67 /* Reset R_CAL/RC_CAL Engine */
68 bcm_phy_write_exp(phydev, 0x00b0, 0x0010); 68 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
69 69
70 /* Disable Reset R_AL/RC_CAL Engine */ 70 /* Disable Reset R_AL/RC_CAL Engine */
71 bcm_phy_write_exp(phydev, 0x00b0, 0x0000); 71 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
72} 72}
73 73
74static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) 74static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index f41b224a9cdb..ab195f0916d6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev)
573 ksz9031_of_load_skew_values(phydev, of_node, 573 ksz9031_of_load_skew_values(phydev, of_node,
574 MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, 574 MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
575 tx_data_skews, 4); 575 tx_data_skews, 4);
576
577 /* Silicon Errata Sheet (DS80000691D or DS80000692D):
578 * When the device links in the 1000BASE-T slave mode only,
579 * the optional 125MHz reference output clock (CLK125_NDO)
580 * has wide duty cycle variation.
581 *
582 * The optional CLK125_NDO clock does not meet the RGMII
583 * 45/55 percent (min/max) duty cycle requirement and therefore
584 * cannot be used directly by the MAC side for clocking
585 * applications that have setup/hold time requirements on
586 * rising and falling clock edges.
587 *
588 * Workaround:
589 * Force the phy to be the master to receive a stable clock
590 * which meets the duty cycle requirement.
591 */
592 if (of_property_read_bool(of_node, "micrel,force-master")) {
593 result = phy_read(phydev, MII_CTRL1000);
594 if (result < 0)
595 goto err_force_master;
596
597 /* enable master mode, config & prefer master */
598 result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER;
599 result = phy_write(phydev, MII_CTRL1000, result);
600 if (result < 0)
601 goto err_force_master;
602 }
576 } 603 }
577 604
578 return ksz9031_center_flp_timing(phydev); 605 return ksz9031_center_flp_timing(phydev);
606
607err_force_master:
608 phydev_err(phydev, "failed to force the phy to master mode\n");
609 return result;
579} 610}
580 611
581#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 612#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index dc7c7ec43202..02ad03a2fab7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
605 605
606 if (cmd == PPPIOCDETACH) { 606 if (cmd == PPPIOCDETACH) {
607 /* 607 /*
608 * We have to be careful here... if the file descriptor 608 * PPPIOCDETACH is no longer supported as it was heavily broken,
609 * has been dup'd, we could have another process in the 609 * and is only known to have been used by pppd older than
610 * middle of a poll using the same file *, so we had 610 * ppp-2.4.2 (released November 2003).
611 * better not free the interface data structures -
612 * instead we fail the ioctl. Even in this case, we
613 * shut down the interface if we are the owner of it.
614 * Actually, we should get rid of PPPIOCDETACH, userland
615 * (i.e. pppd) could achieve the same effect by closing
616 * this fd and reopening /dev/ppp.
617 */ 611 */
612 pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
613 current->comm, current->pid);
618 err = -EINVAL; 614 err = -EINVAL;
619 if (pf->kind == INTERFACE) {
620 ppp = PF_TO_PPP(pf);
621 rtnl_lock();
622 if (file == ppp->owner)
623 unregister_netdevice(ppp->dev);
624 rtnl_unlock();
625 }
626 if (atomic_long_read(&file->f_count) < 2) {
627 ppp_release(NULL, file);
628 err = 0;
629 } else
630 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
631 atomic_long_read(&file->f_count));
632 goto out; 615 goto out;
633 } 616 }
634 617
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ef33950a45d9..45d807796a18 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile)
681 skb_queue_purge(&tfile->sk.sk_error_queue); 681 skb_queue_purge(&tfile->sk.sk_error_queue);
682} 682}
683 683
684static void tun_cleanup_tx_ring(struct tun_file *tfile)
685{
686 if (tfile->tx_ring.queue) {
687 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
688 xdp_rxq_info_unreg(&tfile->xdp_rxq);
689 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
690 }
691}
692
693static void __tun_detach(struct tun_file *tfile, bool clean) 684static void __tun_detach(struct tun_file *tfile, bool clean)
694{ 685{
695 struct tun_file *ntfile; 686 struct tun_file *ntfile;
@@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
736 tun->dev->reg_state == NETREG_REGISTERED) 727 tun->dev->reg_state == NETREG_REGISTERED)
737 unregister_netdevice(tun->dev); 728 unregister_netdevice(tun->dev);
738 } 729 }
739 tun_cleanup_tx_ring(tfile); 730 if (tun)
731 xdp_rxq_info_unreg(&tfile->xdp_rxq);
732 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
740 sock_put(&tfile->sk); 733 sock_put(&tfile->sk);
741 } 734 }
742} 735}
@@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev)
783 tun_napi_del(tun, tfile); 776 tun_napi_del(tun, tfile);
784 /* Drop read queue */ 777 /* Drop read queue */
785 tun_queue_purge(tfile); 778 tun_queue_purge(tfile);
779 xdp_rxq_info_unreg(&tfile->xdp_rxq);
786 sock_put(&tfile->sk); 780 sock_put(&tfile->sk);
787 tun_cleanup_tx_ring(tfile);
788 } 781 }
789 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 782 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
790 tun_enable_queue(tfile); 783 tun_enable_queue(tfile);
791 tun_queue_purge(tfile); 784 tun_queue_purge(tfile);
785 xdp_rxq_info_unreg(&tfile->xdp_rxq);
792 sock_put(&tfile->sk); 786 sock_put(&tfile->sk);
793 tun_cleanup_tx_ring(tfile);
794 } 787 }
795 BUG_ON(tun->numdisabled != 0); 788 BUG_ON(tun->numdisabled != 0);
796 789
@@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
834 } 827 }
835 828
836 if (!tfile->detached && 829 if (!tfile->detached &&
837 ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { 830 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
831 GFP_KERNEL, tun_ptr_free)) {
838 err = -ENOMEM; 832 err = -ENOMEM;
839 goto out; 833 goto out;
840 } 834 }
@@ -1429,6 +1423,13 @@ static void tun_net_init(struct net_device *dev)
1429 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1423 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1430} 1424}
1431 1425
1426static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1427{
1428 struct sock *sk = tfile->socket.sk;
1429
1430 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1431}
1432
1432/* Character device part */ 1433/* Character device part */
1433 1434
1434/* Poll */ 1435/* Poll */
@@ -1451,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1451 if (!ptr_ring_empty(&tfile->tx_ring)) 1452 if (!ptr_ring_empty(&tfile->tx_ring))
1452 mask |= EPOLLIN | EPOLLRDNORM; 1453 mask |= EPOLLIN | EPOLLRDNORM;
1453 1454
1454 if (tun->dev->flags & IFF_UP && 1455 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1455 (sock_writeable(sk) || 1456 * guarantee EPOLLOUT to be raised by either here or
1456 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1457 * tun_sock_write_space(). Then process could get notification
1457 sock_writeable(sk)))) 1458 * after it writes to a down device and meets -EIO.
1459 */
1460 if (tun_sock_writeable(tun, tfile) ||
1461 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1462 tun_sock_writeable(tun, tfile)))
1458 mask |= EPOLLOUT | EPOLLWRNORM; 1463 mask |= EPOLLOUT | EPOLLWRNORM;
1459 1464
1460 if (tun->dev->reg_state != NETREG_REGISTERED) 1465 if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -3219,6 +3224,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3219 &tun_proto, 0); 3224 &tun_proto, 0);
3220 if (!tfile) 3225 if (!tfile)
3221 return -ENOMEM; 3226 return -ENOMEM;
3227 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3228 sk_free(&tfile->sk);
3229 return -ENOMEM;
3230 }
3231
3222 RCU_INIT_POINTER(tfile->tun, NULL); 3232 RCU_INIT_POINTER(tfile->tun, NULL);
3223 tfile->flags = 0; 3233 tfile->flags = 0;
3224 tfile->ifindex = 0; 3234 tfile->ifindex = 0;
@@ -3239,8 +3249,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3239 3249
3240 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3250 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3241 3251
3242 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
3243
3244 return 0; 3252 return 0;
3245} 3253}
3246 3254
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 770422e953f7..032e1ac10a30 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
707 void *data; 707 void *data;
708 u32 act; 708 u32 act;
709 709
710 /* Transient failure which in theory could occur if
711 * in-flight packets from before XDP was enabled reach
712 * the receive path after XDP is loaded.
713 */
714 if (unlikely(hdr->hdr.gso_type))
715 goto err_xdp;
716
710 /* This happens when rx buffer size is underestimated 717 /* This happens when rx buffer size is underestimated
711 * or headroom is not enough because of the buffer 718 * or headroom is not enough because of the buffer
712 * was refilled before XDP is set. This should only 719 * was refilled before XDP is set. This should only
@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
727 xdp_page = page; 734 xdp_page = page;
728 } 735 }
729 736
730 /* Transient failure which in theory could occur if
731 * in-flight packets from before XDP was enabled reach
732 * the receive path after XDP is loaded. In practice I
733 * was not able to create this condition.
734 */
735 if (unlikely(hdr->hdr.gso_type))
736 goto err_xdp;
737
738 /* Allow consuming headroom but reserve enough space to push 737 /* Allow consuming headroom but reserve enough space to push
739 * the descriptor on if we get an XDP_TX return code. 738 * the descriptor on if we get an XDP_TX return code.
740 */ 739 */
@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
775 } 774 }
776 *xdp_xmit = true; 775 *xdp_xmit = true;
777 if (unlikely(xdp_page != page)) 776 if (unlikely(xdp_page != page))
778 goto err_xdp; 777 put_page(page);
779 rcu_read_unlock(); 778 rcu_read_unlock();
780 goto xdp_xmit; 779 goto xdp_xmit;
781 case XDP_REDIRECT: 780 case XDP_REDIRECT:
@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
787 } 786 }
788 *xdp_xmit = true; 787 *xdp_xmit = true;
789 if (unlikely(xdp_page != page)) 788 if (unlikely(xdp_page != page))
790 goto err_xdp; 789 put_page(page);
791 rcu_read_unlock(); 790 rcu_read_unlock();
792 goto xdp_xmit; 791 goto xdp_xmit;
793 default: 792 default:
@@ -875,7 +874,7 @@ err_xdp:
875 rcu_read_unlock(); 874 rcu_read_unlock();
876err_skb: 875err_skb:
877 put_page(page); 876 put_page(page);
878 while (--num_buf) { 877 while (num_buf-- > 1) {
879 buf = virtqueue_get_buf(rq->vq, &len); 878 buf = virtqueue_get_buf(rq->vq, &len);
880 if (unlikely(!buf)) { 879 if (unlikely(!buf)) {
881 pr_debug("%s: rx error: %d buffers missing\n", 880 pr_debug("%s: rx error: %d buffers missing\n",
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9ebe2a689966..27a9bb8c9611 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
369 369
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { 371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 /* Prevent any &gdesc->tcd field from being (speculatively)
373 * read before (&gdesc->tcd)->gen is read.
374 */
375 dma_rmb();
376
372 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( 377 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc->tcd), tq, adapter->pdev, 378 &gdesc->tcd), tq, adapter->pdev,
374 adapter); 379 adapter);
@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1103 gdesc->txd.tci = skb_vlan_tag_get(skb); 1108 gdesc->txd.tci = skb_vlan_tag_get(skb);
1104 } 1109 }
1105 1110
1111 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1112 * all other writes to &gdesc->txd.
1113 */
1114 dma_wmb();
1115
1106 /* finally flips the GEN bit of the SOP desc. */ 1116 /* finally flips the GEN bit of the SOP desc. */
1107 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 1117 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1108 VMXNET3_TXD_GEN); 1118 VMXNET3_TXD_GEN);
@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1298 */ 1308 */
1299 break; 1309 break;
1300 } 1310 }
1311
1312 /* Prevent any rcd field from being (speculatively) read before
1313 * rcd->gen is read.
1314 */
1315 dma_rmb();
1316
1301 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && 1317 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1302 rcd->rqID != rq->dataRingQid); 1318 rcd->rqID != rq->dataRingQid);
1303 idx = rcd->rxdIdx; 1319 idx = rcd->rxdIdx;
@@ -1528,6 +1544,12 @@ rcd_done:
1528 ring->next2comp = idx; 1544 ring->next2comp = idx;
1529 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); 1545 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1530 ring = rq->rx_ring + ring_idx; 1546 ring = rq->rx_ring + ring_idx;
1547
1548 /* Ensure that the writes to rxd->gen bits will be observed
1549 * after all other writes to rxd objects.
1550 */
1551 dma_wmb();
1552
1531 while (num_to_alloc) { 1553 while (num_to_alloc) {
1532 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, 1554 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1533 &rxCmdDesc); 1555 &rxCmdDesc);
@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2688/* ==================== initialization and cleanup routines ============ */ 2710/* ==================== initialization and cleanup routines ============ */
2689 2711
2690static int 2712static int
2691vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 2713vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2692{ 2714{
2693 int err; 2715 int err;
2694 unsigned long mmio_start, mmio_len; 2716 unsigned long mmio_start, mmio_len;
@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2700 return err; 2722 return err;
2701 } 2723 }
2702 2724
2703 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2704 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2705 dev_err(&pdev->dev,
2706 "pci_set_consistent_dma_mask failed\n");
2707 err = -EIO;
2708 goto err_set_mask;
2709 }
2710 *dma64 = true;
2711 } else {
2712 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2713 dev_err(&pdev->dev,
2714 "pci_set_dma_mask failed\n");
2715 err = -EIO;
2716 goto err_set_mask;
2717 }
2718 *dma64 = false;
2719 }
2720
2721 err = pci_request_selected_regions(pdev, (1 << 2) - 1, 2725 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2722 vmxnet3_driver_name); 2726 vmxnet3_driver_name);
2723 if (err) { 2727 if (err) {
2724 dev_err(&pdev->dev, 2728 dev_err(&pdev->dev,
2725 "Failed to request region for adapter: error %d\n", err); 2729 "Failed to request region for adapter: error %d\n", err);
2726 goto err_set_mask; 2730 goto err_enable_device;
2727 } 2731 }
2728 2732
2729 pci_set_master(pdev); 2733 pci_set_master(pdev);
@@ -2751,7 +2755,7 @@ err_bar1:
2751 iounmap(adapter->hw_addr0); 2755 iounmap(adapter->hw_addr0);
2752err_ioremap: 2756err_ioremap:
2753 pci_release_selected_regions(pdev, (1 << 2) - 1); 2757 pci_release_selected_regions(pdev, (1 << 2) - 1);
2754err_set_mask: 2758err_enable_device:
2755 pci_disable_device(pdev); 2759 pci_disable_device(pdev);
2756 return err; 2760 return err;
2757} 2761}
@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3254#endif 3258#endif
3255 }; 3259 };
3256 int err; 3260 int err;
3257 bool dma64 = false; /* stupid gcc */ 3261 bool dma64;
3258 u32 ver; 3262 u32 ver;
3259 struct net_device *netdev; 3263 struct net_device *netdev;
3260 struct vmxnet3_adapter *adapter; 3264 struct vmxnet3_adapter *adapter;
@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3300 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 3304 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3301 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 3305 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3302 3306
3307 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
3308 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
3309 dev_err(&pdev->dev,
3310 "pci_set_consistent_dma_mask failed\n");
3311 err = -EIO;
3312 goto err_set_mask;
3313 }
3314 dma64 = true;
3315 } else {
3316 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
3317 dev_err(&pdev->dev,
3318 "pci_set_dma_mask failed\n");
3319 err = -EIO;
3320 goto err_set_mask;
3321 }
3322 dma64 = false;
3323 }
3324
3303 spin_lock_init(&adapter->cmd_lock); 3325 spin_lock_init(&adapter->cmd_lock);
3304 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 3326 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3305 sizeof(struct vmxnet3_adapter), 3327 sizeof(struct vmxnet3_adapter),
@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3307 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { 3329 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3308 dev_err(&pdev->dev, "Failed to map dma\n"); 3330 dev_err(&pdev->dev, "Failed to map dma\n");
3309 err = -EFAULT; 3331 err = -EFAULT;
3310 goto err_dma_map; 3332 goto err_set_mask;
3311 } 3333 }
3312 adapter->shared = dma_alloc_coherent( 3334 adapter->shared = dma_alloc_coherent(
3313 &adapter->pdev->dev, 3335 &adapter->pdev->dev,
@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3358 } 3380 }
3359#endif /* VMXNET3_RSS */ 3381#endif /* VMXNET3_RSS */
3360 3382
3361 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 3383 err = vmxnet3_alloc_pci_resources(adapter);
3362 if (err < 0) 3384 if (err < 0)
3363 goto err_alloc_pci; 3385 goto err_alloc_pci;
3364 3386
@@ -3504,7 +3526,7 @@ err_alloc_queue_desc:
3504err_alloc_shared: 3526err_alloc_shared:
3505 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3527 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3506 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 3528 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3507err_dma_map: 3529err_set_mask:
3508 free_netdev(netdev); 3530 free_netdev(netdev);
3509 return err; 3531 return err;
3510} 3532}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a3326463b71f..a2c554f8a61b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,12 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* Each byte of this 32-bit integer encodes a version number in
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 75 * VMXNET3_DRIVER_VERSION_STRING.
76 */
77#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
76 78
77#if defined(CONFIG_PCI_MSI) 79#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 80 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a017a0d71ea..920c23e542a5 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3340,7 +3340,7 @@ out_err:
3340static int hwsim_dump_radio_nl(struct sk_buff *skb, 3340static int hwsim_dump_radio_nl(struct sk_buff *skb,
3341 struct netlink_callback *cb) 3341 struct netlink_callback *cb)
3342{ 3342{
3343 int last_idx = cb->args[0]; 3343 int last_idx = cb->args[0] - 1;
3344 struct mac80211_hwsim_data *data = NULL; 3344 struct mac80211_hwsim_data *data = NULL;
3345 int res = 0; 3345 int res = 0;
3346 void *hdr; 3346 void *hdr;
@@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
3368 last_idx = data->idx; 3368 last_idx = data->idx;
3369 } 3369 }
3370 3370
3371 cb->args[0] = last_idx; 3371 cb->args[0] = last_idx + 1;
3372 3372
3373 /* list changed, but no new element sent, set interrupted flag */ 3373 /* list changed, but no new element sent, set interrupted flag */
3374 if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { 3374 if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 88a8b5916624..dbb7464c018c 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -27,7 +27,7 @@ config NVME_FABRICS
27 27
28config NVME_RDMA 28config NVME_RDMA
29 tristate "NVM Express over Fabrics RDMA host driver" 29 tristate "NVM Express over Fabrics RDMA host driver"
30 depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK 30 depends on INFINIBAND_ADDR_TRANS && BLOCK
31 select NVME_CORE 31 select NVME_CORE
32 select NVME_FABRICS 32 select NVME_FABRICS
33 select SG_POOL 33 select SG_POOL
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 99b857e5a7a9..b9ca782fe82d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1447,8 +1447,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1447 if (ns->lba_shift == 0) 1447 if (ns->lba_shift == 0)
1448 ns->lba_shift = 9; 1448 ns->lba_shift = 9;
1449 ns->noiob = le16_to_cpu(id->noiob); 1449 ns->noiob = le16_to_cpu(id->noiob);
1450 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1451 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1450 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1451 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1452 /* the PI implementation requires metadata equal t10 pi tuple size */ 1452 /* the PI implementation requires metadata equal t10 pi tuple size */
1453 if (ns->ms == sizeof(struct t10_pi_tuple)) 1453 if (ns->ms == sizeof(struct t10_pi_tuple))
1454 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1454 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 3c7b61ddb0d1..7595664ee753 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
27 27
28config NVME_TARGET_RDMA 28config NVME_TARGET_RDMA
29 tristate "NVMe over Fabrics RDMA target support" 29 tristate "NVMe over Fabrics RDMA target support"
30 depends on INFINIBAND && INFINIBAND_ADDR_TRANS 30 depends on INFINIBAND_ADDR_TRANS
31 depends on NVME_TARGET 31 depends on NVME_TARGET
32 select SGL_ALLOC 32 select SGL_ALLOC
33 help 33 help
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 92fa94a6dcc1..ab2f3fead6b1 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -33,8 +33,6 @@ LIST_HEAD(opp_tables);
33/* Lock to allow exclusive modification to the device and opp lists */ 33/* Lock to allow exclusive modification to the device and opp lists */
34DEFINE_MUTEX(opp_table_lock); 34DEFINE_MUTEX(opp_table_lock);
35 35
36static void dev_pm_opp_get(struct dev_pm_opp *opp);
37
38static struct opp_device *_find_opp_dev(const struct device *dev, 36static struct opp_device *_find_opp_dev(const struct device *dev,
39 struct opp_table *opp_table) 37 struct opp_table *opp_table)
40{ 38{
@@ -281,6 +279,23 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
281} 279}
282EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); 280EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
283 281
282int _get_opp_count(struct opp_table *opp_table)
283{
284 struct dev_pm_opp *opp;
285 int count = 0;
286
287 mutex_lock(&opp_table->lock);
288
289 list_for_each_entry(opp, &opp_table->opp_list, node) {
290 if (opp->available)
291 count++;
292 }
293
294 mutex_unlock(&opp_table->lock);
295
296 return count;
297}
298
284/** 299/**
285 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table 300 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
286 * @dev: device for which we do this operation 301 * @dev: device for which we do this operation
@@ -291,25 +306,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq);
291int dev_pm_opp_get_opp_count(struct device *dev) 306int dev_pm_opp_get_opp_count(struct device *dev)
292{ 307{
293 struct opp_table *opp_table; 308 struct opp_table *opp_table;
294 struct dev_pm_opp *temp_opp; 309 int count;
295 int count = 0;
296 310
297 opp_table = _find_opp_table(dev); 311 opp_table = _find_opp_table(dev);
298 if (IS_ERR(opp_table)) { 312 if (IS_ERR(opp_table)) {
299 count = PTR_ERR(opp_table); 313 count = PTR_ERR(opp_table);
300 dev_dbg(dev, "%s: OPP table not found (%d)\n", 314 dev_dbg(dev, "%s: OPP table not found (%d)\n",
301 __func__, count); 315 __func__, count);
302 return count; 316 return 0;
303 }
304
305 mutex_lock(&opp_table->lock);
306
307 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
308 if (temp_opp->available)
309 count++;
310 } 317 }
311 318
312 mutex_unlock(&opp_table->lock); 319 count = _get_opp_count(opp_table);
313 dev_pm_opp_put_opp_table(opp_table); 320 dev_pm_opp_put_opp_table(opp_table);
314 321
315 return count; 322 return count;
@@ -892,7 +899,7 @@ static void _opp_kref_release(struct kref *kref)
892 dev_pm_opp_put_opp_table(opp_table); 899 dev_pm_opp_put_opp_table(opp_table);
893} 900}
894 901
895static void dev_pm_opp_get(struct dev_pm_opp *opp) 902void dev_pm_opp_get(struct dev_pm_opp *opp)
896{ 903{
897 kref_get(&opp->kref); 904 kref_get(&opp->kref);
898} 905}
@@ -985,22 +992,11 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
985 return true; 992 return true;
986} 993}
987 994
988/* 995static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
989 * Returns: 996 struct opp_table *opp_table,
990 * 0: On success. And appropriate error message for duplicate OPPs. 997 struct list_head **head)
991 * -EBUSY: For OPP with same freq/volt and is available. The callers of
992 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
993 * sure we don't print error messages unnecessarily if different parts of
994 * kernel try to initialize the OPP table.
995 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
996 * should be considered an error by the callers of _opp_add().
997 */
998int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
999 struct opp_table *opp_table)
1000{ 998{
1001 struct dev_pm_opp *opp; 999 struct dev_pm_opp *opp;
1002 struct list_head *head;
1003 int ret;
1004 1000
1005 /* 1001 /*
1006 * Insert new OPP in order of increasing frequency and discard if 1002 * Insert new OPP in order of increasing frequency and discard if
@@ -1010,17 +1006,14 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1010 * loop, don't replace it with head otherwise it will become an infinite 1006 * loop, don't replace it with head otherwise it will become an infinite
1011 * loop. 1007 * loop.
1012 */ 1008 */
1013 mutex_lock(&opp_table->lock);
1014 head = &opp_table->opp_list;
1015
1016 list_for_each_entry(opp, &opp_table->opp_list, node) { 1009 list_for_each_entry(opp, &opp_table->opp_list, node) {
1017 if (new_opp->rate > opp->rate) { 1010 if (new_opp->rate > opp->rate) {
1018 head = &opp->node; 1011 *head = &opp->node;
1019 continue; 1012 continue;
1020 } 1013 }
1021 1014
1022 if (new_opp->rate < opp->rate) 1015 if (new_opp->rate < opp->rate)
1023 break; 1016 return 0;
1024 1017
1025 /* Duplicate OPPs */ 1018 /* Duplicate OPPs */
1026 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", 1019 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
@@ -1029,15 +1022,39 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1029 new_opp->supplies[0].u_volt, new_opp->available); 1022 new_opp->supplies[0].u_volt, new_opp->available);
1030 1023
1031 /* Should we compare voltages for all regulators here ? */ 1024 /* Should we compare voltages for all regulators here ? */
1032 ret = opp->available && 1025 return opp->available &&
1033 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; 1026 new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST;
1034
1035 mutex_unlock(&opp_table->lock);
1036 return ret;
1037 } 1027 }
1038 1028
1039 if (opp_table->get_pstate) 1029 return 0;
1040 new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate); 1030}
1031
1032/*
1033 * Returns:
1034 * 0: On success. And appropriate error message for duplicate OPPs.
1035 * -EBUSY: For OPP with same freq/volt and is available. The callers of
1036 * _opp_add() must return 0 if they receive -EBUSY from it. This is to make
1037 * sure we don't print error messages unnecessarily if different parts of
1038 * kernel try to initialize the OPP table.
1039 * -EEXIST: For OPP with same freq but different volt or is unavailable. This
1040 * should be considered an error by the callers of _opp_add().
1041 */
1042int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1043 struct opp_table *opp_table, bool rate_not_available)
1044{
1045 struct list_head *head;
1046 int ret;
1047
1048 mutex_lock(&opp_table->lock);
1049 head = &opp_table->opp_list;
1050
1051 if (likely(!rate_not_available)) {
1052 ret = _opp_is_duplicate(dev, new_opp, opp_table, &head);
1053 if (ret) {
1054 mutex_unlock(&opp_table->lock);
1055 return ret;
1056 }
1057 }
1041 1058
1042 list_add(&new_opp->node, head); 1059 list_add(&new_opp->node, head);
1043 mutex_unlock(&opp_table->lock); 1060 mutex_unlock(&opp_table->lock);
@@ -1104,7 +1121,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
1104 new_opp->available = true; 1121 new_opp->available = true;
1105 new_opp->dynamic = dynamic; 1122 new_opp->dynamic = dynamic;
1106 1123
1107 ret = _opp_add(dev, new_opp, opp_table); 1124 ret = _opp_add(dev, new_opp, opp_table, false);
1108 if (ret) { 1125 if (ret) {
1109 /* Don't return error for duplicate OPPs */ 1126 /* Don't return error for duplicate OPPs */
1110 if (ret == -EBUSY) 1127 if (ret == -EBUSY)
@@ -1140,7 +1157,6 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
1140 const u32 *versions, unsigned int count) 1157 const u32 *versions, unsigned int count)
1141{ 1158{
1142 struct opp_table *opp_table; 1159 struct opp_table *opp_table;
1143 int ret;
1144 1160
1145 opp_table = dev_pm_opp_get_opp_table(dev); 1161 opp_table = dev_pm_opp_get_opp_table(dev);
1146 if (!opp_table) 1162 if (!opp_table)
@@ -1149,29 +1165,20 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
1149 /* Make sure there are no concurrent readers while updating opp_table */ 1165 /* Make sure there are no concurrent readers while updating opp_table */
1150 WARN_ON(!list_empty(&opp_table->opp_list)); 1166 WARN_ON(!list_empty(&opp_table->opp_list));
1151 1167
1152 /* Do we already have a version hierarchy associated with opp_table? */ 1168 /* Another CPU that shares the OPP table has set the property ? */
1153 if (opp_table->supported_hw) { 1169 if (opp_table->supported_hw)
1154 dev_err(dev, "%s: Already have supported hardware list\n", 1170 return opp_table;
1155 __func__);
1156 ret = -EBUSY;
1157 goto err;
1158 }
1159 1171
1160 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions), 1172 opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1161 GFP_KERNEL); 1173 GFP_KERNEL);
1162 if (!opp_table->supported_hw) { 1174 if (!opp_table->supported_hw) {
1163 ret = -ENOMEM; 1175 dev_pm_opp_put_opp_table(opp_table);
1164 goto err; 1176 return ERR_PTR(-ENOMEM);
1165 } 1177 }
1166 1178
1167 opp_table->supported_hw_count = count; 1179 opp_table->supported_hw_count = count;
1168 1180
1169 return opp_table; 1181 return opp_table;
1170
1171err:
1172 dev_pm_opp_put_opp_table(opp_table);
1173
1174 return ERR_PTR(ret);
1175} 1182}
1176EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); 1183EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1177 1184
@@ -1188,12 +1195,6 @@ void dev_pm_opp_put_supported_hw(struct opp_table *opp_table)
1188 /* Make sure there are no concurrent readers while updating opp_table */ 1195 /* Make sure there are no concurrent readers while updating opp_table */
1189 WARN_ON(!list_empty(&opp_table->opp_list)); 1196 WARN_ON(!list_empty(&opp_table->opp_list));
1190 1197
1191 if (!opp_table->supported_hw) {
1192 pr_err("%s: Doesn't have supported hardware list\n",
1193 __func__);
1194 return;
1195 }
1196
1197 kfree(opp_table->supported_hw); 1198 kfree(opp_table->supported_hw);
1198 opp_table->supported_hw = NULL; 1199 opp_table->supported_hw = NULL;
1199 opp_table->supported_hw_count = 0; 1200 opp_table->supported_hw_count = 0;
@@ -1215,7 +1216,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1215struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) 1216struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1216{ 1217{
1217 struct opp_table *opp_table; 1218 struct opp_table *opp_table;
1218 int ret;
1219 1219
1220 opp_table = dev_pm_opp_get_opp_table(dev); 1220 opp_table = dev_pm_opp_get_opp_table(dev);
1221 if (!opp_table) 1221 if (!opp_table)
@@ -1224,26 +1224,17 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1224 /* Make sure there are no concurrent readers while updating opp_table */ 1224 /* Make sure there are no concurrent readers while updating opp_table */
1225 WARN_ON(!list_empty(&opp_table->opp_list)); 1225 WARN_ON(!list_empty(&opp_table->opp_list));
1226 1226
1227 /* Do we already have a prop-name associated with opp_table? */ 1227 /* Another CPU that shares the OPP table has set the property ? */
1228 if (opp_table->prop_name) { 1228 if (opp_table->prop_name)
1229 dev_err(dev, "%s: Already have prop-name %s\n", __func__, 1229 return opp_table;
1230 opp_table->prop_name);
1231 ret = -EBUSY;
1232 goto err;
1233 }
1234 1230
1235 opp_table->prop_name = kstrdup(name, GFP_KERNEL); 1231 opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1236 if (!opp_table->prop_name) { 1232 if (!opp_table->prop_name) {
1237 ret = -ENOMEM; 1233 dev_pm_opp_put_opp_table(opp_table);
1238 goto err; 1234 return ERR_PTR(-ENOMEM);
1239 } 1235 }
1240 1236
1241 return opp_table; 1237 return opp_table;
1242
1243err:
1244 dev_pm_opp_put_opp_table(opp_table);
1245
1246 return ERR_PTR(ret);
1247} 1238}
1248EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); 1239EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1249 1240
@@ -1260,11 +1251,6 @@ void dev_pm_opp_put_prop_name(struct opp_table *opp_table)
1260 /* Make sure there are no concurrent readers while updating opp_table */ 1251 /* Make sure there are no concurrent readers while updating opp_table */
1261 WARN_ON(!list_empty(&opp_table->opp_list)); 1252 WARN_ON(!list_empty(&opp_table->opp_list));
1262 1253
1263 if (!opp_table->prop_name) {
1264 pr_err("%s: Doesn't have a prop-name\n", __func__);
1265 return;
1266 }
1267
1268 kfree(opp_table->prop_name); 1254 kfree(opp_table->prop_name);
1269 opp_table->prop_name = NULL; 1255 opp_table->prop_name = NULL;
1270 1256
@@ -1334,11 +1320,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
1334 goto err; 1320 goto err;
1335 } 1321 }
1336 1322
1337 /* Already have regulators set */ 1323 /* Another CPU that shares the OPP table has set the regulators ? */
1338 if (opp_table->regulators) { 1324 if (opp_table->regulators)
1339 ret = -EBUSY; 1325 return opp_table;
1340 goto err;
1341 }
1342 1326
1343 opp_table->regulators = kmalloc_array(count, 1327 opp_table->regulators = kmalloc_array(count,
1344 sizeof(*opp_table->regulators), 1328 sizeof(*opp_table->regulators),
@@ -1392,10 +1376,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1392{ 1376{
1393 int i; 1377 int i;
1394 1378
1395 if (!opp_table->regulators) { 1379 if (!opp_table->regulators)
1396 pr_err("%s: Doesn't have regulators set\n", __func__); 1380 goto put_opp_table;
1397 return;
1398 }
1399 1381
1400 /* Make sure there are no concurrent readers while updating opp_table */ 1382 /* Make sure there are no concurrent readers while updating opp_table */
1401 WARN_ON(!list_empty(&opp_table->opp_list)); 1383 WARN_ON(!list_empty(&opp_table->opp_list));
@@ -1409,6 +1391,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
1409 opp_table->regulators = NULL; 1391 opp_table->regulators = NULL;
1410 opp_table->regulator_count = 0; 1392 opp_table->regulator_count = 0;
1411 1393
1394put_opp_table:
1412 dev_pm_opp_put_opp_table(opp_table); 1395 dev_pm_opp_put_opp_table(opp_table);
1413} 1396}
1414EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); 1397EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
@@ -1494,7 +1477,6 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
1494 int (*set_opp)(struct dev_pm_set_opp_data *data)) 1477 int (*set_opp)(struct dev_pm_set_opp_data *data))
1495{ 1478{
1496 struct opp_table *opp_table; 1479 struct opp_table *opp_table;
1497 int ret;
1498 1480
1499 if (!set_opp) 1481 if (!set_opp)
1500 return ERR_PTR(-EINVAL); 1482 return ERR_PTR(-EINVAL);
@@ -1505,24 +1487,15 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
1505 1487
1506 /* This should be called before OPPs are initialized */ 1488 /* This should be called before OPPs are initialized */
1507 if (WARN_ON(!list_empty(&opp_table->opp_list))) { 1489 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1508 ret = -EBUSY; 1490 dev_pm_opp_put_opp_table(opp_table);
1509 goto err; 1491 return ERR_PTR(-EBUSY);
1510 }
1511
1512 /* Already have custom set_opp helper */
1513 if (WARN_ON(opp_table->set_opp)) {
1514 ret = -EBUSY;
1515 goto err;
1516 } 1492 }
1517 1493
1518 opp_table->set_opp = set_opp; 1494 /* Another CPU that shares the OPP table has set the helper ? */
1495 if (!opp_table->set_opp)
1496 opp_table->set_opp = set_opp;
1519 1497
1520 return opp_table; 1498 return opp_table;
1521
1522err:
1523 dev_pm_opp_put_opp_table(opp_table);
1524
1525 return ERR_PTR(ret);
1526} 1499}
1527EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); 1500EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
1528 1501
@@ -1535,97 +1508,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
1535 */ 1508 */
1536void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) 1509void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
1537{ 1510{
1538 if (!opp_table->set_opp) {
1539 pr_err("%s: Doesn't have custom set_opp helper set\n",
1540 __func__);
1541 return;
1542 }
1543
1544 /* Make sure there are no concurrent readers while updating opp_table */ 1511 /* Make sure there are no concurrent readers while updating opp_table */
1545 WARN_ON(!list_empty(&opp_table->opp_list)); 1512 WARN_ON(!list_empty(&opp_table->opp_list));
1546 1513
1547 opp_table->set_opp = NULL; 1514 opp_table->set_opp = NULL;
1548
1549 dev_pm_opp_put_opp_table(opp_table); 1515 dev_pm_opp_put_opp_table(opp_table);
1550} 1516}
1551EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper); 1517EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
1552 1518
1553/** 1519/**
1554 * dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
1555 * @dev: Device for which the helper is getting registered.
1556 * @get_pstate: Helper.
1557 *
1558 * TODO: Remove this callback after the same information is available via Device
1559 * Tree.
1560 *
1561 * This allows a platform to initialize the performance states of individual
1562 * OPPs for its devices, until we get similar information directly from DT.
1563 *
1564 * This must be called before the OPPs are initialized for the device.
1565 */
1566struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
1567 int (*get_pstate)(struct device *dev, unsigned long rate))
1568{
1569 struct opp_table *opp_table;
1570 int ret;
1571
1572 if (!get_pstate)
1573 return ERR_PTR(-EINVAL);
1574
1575 opp_table = dev_pm_opp_get_opp_table(dev);
1576 if (!opp_table)
1577 return ERR_PTR(-ENOMEM);
1578
1579 /* This should be called before OPPs are initialized */
1580 if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1581 ret = -EBUSY;
1582 goto err;
1583 }
1584
1585 /* Already have genpd_performance_state set */
1586 if (WARN_ON(opp_table->genpd_performance_state)) {
1587 ret = -EBUSY;
1588 goto err;
1589 }
1590
1591 opp_table->genpd_performance_state = true;
1592 opp_table->get_pstate = get_pstate;
1593
1594 return opp_table;
1595
1596err:
1597 dev_pm_opp_put_opp_table(opp_table);
1598
1599 return ERR_PTR(ret);
1600}
1601EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
1602
1603/**
1604 * dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
1605 * get_pstate() helper
1606 * @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
1607 *
1608 * Release resources blocked for platform specific get_pstate() helper.
1609 */
1610void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
1611{
1612 if (!opp_table->genpd_performance_state) {
1613 pr_err("%s: Doesn't have performance states set\n",
1614 __func__);
1615 return;
1616 }
1617
1618 /* Make sure there are no concurrent readers while updating opp_table */
1619 WARN_ON(!list_empty(&opp_table->opp_list));
1620
1621 opp_table->genpd_performance_state = false;
1622 opp_table->get_pstate = NULL;
1623
1624 dev_pm_opp_put_opp_table(opp_table);
1625}
1626EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
1627
1628/**
1629 * dev_pm_opp_add() - Add an OPP table from a table definitions 1520 * dev_pm_opp_add() - Add an OPP table from a table definitions
1630 * @dev: device for which we do this operation 1521 * @dev: device for which we do this operation
1631 * @freq: Frequency in Hz for this OPP 1522 * @freq: Frequency in Hz for this OPP
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index b03c03576a62..e6828e5f81b0 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -77,10 +77,21 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
77{ 77{
78 struct dentry *pdentry = opp_table->dentry; 78 struct dentry *pdentry = opp_table->dentry;
79 struct dentry *d; 79 struct dentry *d;
80 unsigned long id;
80 char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */ 81 char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
81 82
82 /* Rate is unique to each OPP, use it to give opp-name */ 83 /*
83 snprintf(name, sizeof(name), "opp:%lu", opp->rate); 84 * Get directory name for OPP.
85 *
86 * - Normally rate is unique to each OPP, use it to get unique opp-name.
87 * - For some devices rate isn't available, use index instead.
88 */
89 if (likely(opp->rate))
90 id = opp->rate;
91 else
92 id = _get_opp_count(opp_table);
93
94 snprintf(name, sizeof(name), "opp:%lu", id);
84 95
85 /* Create per-opp directory */ 96 /* Create per-opp directory */
86 d = debugfs_create_dir(name, pdentry); 97 d = debugfs_create_dir(name, pdentry);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index cb716aa2f44b..7af0ddec936b 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/of_device.h> 19#include <linux/of_device.h>
20#include <linux/pm_domain.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/export.h> 22#include <linux/export.h>
22 23
@@ -250,20 +251,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
250 251
251/* Returns opp descriptor node for a device node, caller must 252/* Returns opp descriptor node for a device node, caller must
252 * do of_node_put() */ 253 * do of_node_put() */
253static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) 254static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
255 int index)
254{ 256{
255 /* 257 /* "operating-points-v2" can be an array for power domain providers */
256 * There should be only ONE phandle present in "operating-points-v2" 258 return of_parse_phandle(np, "operating-points-v2", index);
257 * property.
258 */
259
260 return of_parse_phandle(np, "operating-points-v2", 0);
261} 259}
262 260
263/* Returns opp descriptor node for a device, caller must do of_node_put() */ 261/* Returns opp descriptor node for a device, caller must do of_node_put() */
264struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) 262struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
265{ 263{
266 return _opp_of_get_opp_desc_node(dev->of_node); 264 return _opp_of_get_opp_desc_node(dev->of_node, 0);
267} 265}
268EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); 266EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
269 267
@@ -289,9 +287,10 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
289 struct device_node *np) 287 struct device_node *np)
290{ 288{
291 struct dev_pm_opp *new_opp; 289 struct dev_pm_opp *new_opp;
292 u64 rate; 290 u64 rate = 0;
293 u32 val; 291 u32 val;
294 int ret; 292 int ret;
293 bool rate_not_available = false;
295 294
296 new_opp = _opp_allocate(opp_table); 295 new_opp = _opp_allocate(opp_table);
297 if (!new_opp) 296 if (!new_opp)
@@ -299,8 +298,21 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
299 298
300 ret = of_property_read_u64(np, "opp-hz", &rate); 299 ret = of_property_read_u64(np, "opp-hz", &rate);
301 if (ret < 0) { 300 if (ret < 0) {
302 dev_err(dev, "%s: opp-hz not found\n", __func__); 301 /* "opp-hz" is optional for devices like power domains. */
303 goto free_opp; 302 if (!of_find_property(dev->of_node, "#power-domain-cells",
303 NULL)) {
304 dev_err(dev, "%s: opp-hz not found\n", __func__);
305 goto free_opp;
306 }
307
308 rate_not_available = true;
309 } else {
310 /*
311 * Rate is defined as an unsigned long in clk API, and so
312 * casting explicitly to its type. Must be fixed once rate is 64
313 * bit guaranteed in clk API.
314 */
315 new_opp->rate = (unsigned long)rate;
304 } 316 }
305 317
306 /* Check if the OPP supports hardware's hierarchy of versions or not */ 318 /* Check if the OPP supports hardware's hierarchy of versions or not */
@@ -309,12 +321,6 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
309 goto free_opp; 321 goto free_opp;
310 } 322 }
311 323
312 /*
313 * Rate is defined as an unsigned long in clk API, and so casting
314 * explicitly to its type. Must be fixed once rate is 64 bit
315 * guaranteed in clk API.
316 */
317 new_opp->rate = (unsigned long)rate;
318 new_opp->turbo = of_property_read_bool(np, "turbo-mode"); 324 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
319 325
320 new_opp->np = np; 326 new_opp->np = np;
@@ -324,11 +330,13 @@ static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
324 if (!of_property_read_u32(np, "clock-latency-ns", &val)) 330 if (!of_property_read_u32(np, "clock-latency-ns", &val))
325 new_opp->clock_latency_ns = val; 331 new_opp->clock_latency_ns = val;
326 332
333 new_opp->pstate = of_genpd_opp_to_performance_state(dev, np);
334
327 ret = opp_parse_supplies(new_opp, dev, opp_table); 335 ret = opp_parse_supplies(new_opp, dev, opp_table);
328 if (ret) 336 if (ret)
329 goto free_opp; 337 goto free_opp;
330 338
331 ret = _opp_add(dev, new_opp, opp_table); 339 ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
332 if (ret) { 340 if (ret) {
333 /* Don't return error for duplicate OPPs */ 341 /* Don't return error for duplicate OPPs */
334 if (ret == -EBUSY) 342 if (ret == -EBUSY)
@@ -374,7 +382,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
374{ 382{
375 struct device_node *np; 383 struct device_node *np;
376 struct opp_table *opp_table; 384 struct opp_table *opp_table;
377 int ret = 0, count = 0; 385 int ret = 0, count = 0, pstate_count = 0;
386 struct dev_pm_opp *opp;
378 387
379 opp_table = _managed_opp(opp_np); 388 opp_table = _managed_opp(opp_np);
380 if (opp_table) { 389 if (opp_table) {
@@ -408,6 +417,20 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
408 goto put_opp_table; 417 goto put_opp_table;
409 } 418 }
410 419
420 list_for_each_entry(opp, &opp_table->opp_list, node)
421 pstate_count += !!opp->pstate;
422
423 /* Either all or none of the nodes shall have performance state set */
424 if (pstate_count && pstate_count != count) {
425 dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
426 count, pstate_count);
427 ret = -ENOENT;
428 goto put_opp_table;
429 }
430
431 if (pstate_count)
432 opp_table->genpd_performance_state = true;
433
411 opp_table->np = opp_np; 434 opp_table->np = opp_np;
412 if (of_property_read_bool(opp_np, "opp-shared")) 435 if (of_property_read_bool(opp_np, "opp-shared"))
413 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; 436 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
@@ -509,6 +532,54 @@ int dev_pm_opp_of_add_table(struct device *dev)
509} 532}
510EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 533EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
511 534
535/**
536 * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
537 * @dev: device pointer used to lookup OPP table.
538 * @index: Index number.
539 *
540 * Register the initial OPP table with the OPP library for given device only
541 * using the "operating-points-v2" property.
542 *
543 * Return:
544 * 0 On success OR
545 * Duplicate OPPs (both freq and volt are same) and opp->available
546 * -EEXIST Freq are same and volt are different OR
547 * Duplicate OPPs (both freq and volt are same) and !opp->available
548 * -ENOMEM Memory allocation failure
549 * -ENODEV when 'operating-points' property is not found or is invalid data
550 * in device node.
551 * -ENODATA when empty 'operating-points' property is found
552 * -EINVAL when invalid entries are found in opp-v2 table
553 */
554int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
555{
556 struct device_node *opp_np;
557 int ret, count;
558
559again:
560 opp_np = _opp_of_get_opp_desc_node(dev->of_node, index);
561 if (!opp_np) {
562 /*
563 * If only one phandle is present, then the same OPP table
564 * applies for all index requests.
565 */
566 count = of_count_phandle_with_args(dev->of_node,
567 "operating-points-v2", NULL);
568 if (count == 1 && index) {
569 index = 0;
570 goto again;
571 }
572
573 return -ENODEV;
574 }
575
576 ret = _of_add_opp_table_v2(dev, opp_np);
577 of_node_put(opp_np);
578
579 return ret;
580}
581EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
582
512/* CPU device specific helpers */ 583/* CPU device specific helpers */
513 584
514/** 585/**
@@ -613,7 +684,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
613 } 684 }
614 685
615 /* Get OPP descriptor node */ 686 /* Get OPP descriptor node */
616 tmp_np = _opp_of_get_opp_desc_node(cpu_np); 687 tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
617 of_node_put(cpu_np); 688 of_node_put(cpu_np);
618 if (!tmp_np) { 689 if (!tmp_np) {
619 pr_err("%pOF: Couldn't find opp node\n", cpu_np); 690 pr_err("%pOF: Couldn't find opp node\n", cpu_np);
@@ -633,3 +704,76 @@ put_cpu_node:
633 return ret; 704 return ret;
634} 705}
635EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); 706EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
707
708/**
709 * of_dev_pm_opp_find_required_opp() - Search for required OPP.
710 * @dev: The device whose OPP node is referenced by the 'np' DT node.
711 * @np: Node that contains the "required-opps" property.
712 *
713 * Returns the OPP of the device 'dev', whose phandle is present in the "np"
714 * node. Although the "required-opps" property supports having multiple
715 * phandles, this helper routine only parses the very first phandle in the list.
716 *
717 * Return: Matching opp, else returns ERR_PTR in case of error and should be
718 * handled using IS_ERR.
719 *
720 * The callers are required to call dev_pm_opp_put() for the returned OPP after
721 * use.
722 */
723struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev,
724 struct device_node *np)
725{
726 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV);
727 struct device_node *required_np;
728 struct opp_table *opp_table;
729
730 opp_table = _find_opp_table(dev);
731 if (IS_ERR(opp_table))
732 return ERR_CAST(opp_table);
733
734 required_np = of_parse_phandle(np, "required-opps", 0);
735 if (unlikely(!required_np)) {
736 dev_err(dev, "Unable to parse required-opps\n");
737 goto put_opp_table;
738 }
739
740 mutex_lock(&opp_table->lock);
741
742 list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
743 if (temp_opp->available && temp_opp->np == required_np) {
744 opp = temp_opp;
745
746 /* Increment the reference count of OPP */
747 dev_pm_opp_get(opp);
748 break;
749 }
750 }
751
752 mutex_unlock(&opp_table->lock);
753
754 of_node_put(required_np);
755put_opp_table:
756 dev_pm_opp_put_opp_table(opp_table);
757
758 return opp;
759}
760EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp);
761
762/**
763 * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
764 * @opp: opp for which DT node has to be returned for
765 *
766 * Return: DT node corresponding to the opp, else 0 on success.
767 *
768 * The caller needs to put the node with of_node_put() after using it.
769 */
770struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
771{
772 if (IS_ERR_OR_NULL(opp)) {
773 pr_err("%s: Invalid parameters\n", __func__);
774 return NULL;
775 }
776
777 return of_node_get(opp->np);
778}
779EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 4d00061648a3..7c540fd063b2 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -140,7 +140,6 @@ enum opp_table_access {
140 * @genpd_performance_state: Device's power domain support performance state. 140 * @genpd_performance_state: Device's power domain support performance state.
141 * @set_opp: Platform specific set_opp callback 141 * @set_opp: Platform specific set_opp callback
142 * @set_opp_data: Data to be passed to set_opp callback 142 * @set_opp_data: Data to be passed to set_opp callback
143 * @get_pstate: Platform specific get_pstate callback
144 * @dentry: debugfs dentry pointer of the real device directory (not links). 143 * @dentry: debugfs dentry pointer of the real device directory (not links).
145 * @dentry_name: Name of the real dentry. 144 * @dentry_name: Name of the real dentry.
146 * 145 *
@@ -178,7 +177,6 @@ struct opp_table {
178 177
179 int (*set_opp)(struct dev_pm_set_opp_data *data); 178 int (*set_opp)(struct dev_pm_set_opp_data *data);
180 struct dev_pm_set_opp_data *set_opp_data; 179 struct dev_pm_set_opp_data *set_opp_data;
181 int (*get_pstate)(struct device *dev, unsigned long rate);
182 180
183#ifdef CONFIG_DEBUG_FS 181#ifdef CONFIG_DEBUG_FS
184 struct dentry *dentry; 182 struct dentry *dentry;
@@ -187,14 +185,16 @@ struct opp_table {
187}; 185};
188 186
189/* Routines internal to opp core */ 187/* Routines internal to opp core */
188void dev_pm_opp_get(struct dev_pm_opp *opp);
190void _get_opp_table_kref(struct opp_table *opp_table); 189void _get_opp_table_kref(struct opp_table *opp_table);
190int _get_opp_count(struct opp_table *opp_table);
191struct opp_table *_find_opp_table(struct device *dev); 191struct opp_table *_find_opp_table(struct device *dev);
192struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); 192struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
193void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all); 193void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all);
194void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all); 194void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all);
195struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table); 195struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
196void _opp_free(struct dev_pm_opp *opp); 196void _opp_free(struct dev_pm_opp *opp);
197int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); 197int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
198int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); 198int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
199void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of); 199void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
200struct opp_table *_add_opp_table(struct device *dev); 200struct opp_table *_add_opp_table(struct device *dev);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 126cf19e869b..297599fcbc32 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1195,7 +1195,7 @@ void * ccio_get_iommu(const struct parisc_device *dev)
1195 * to/from certain pages. To avoid this happening, we mark these pages 1195 * to/from certain pages. To avoid this happening, we mark these pages
1196 * as `used', and ensure that nothing will try to allocate from them. 1196 * as `used', and ensure that nothing will try to allocate from them.
1197 */ 1197 */
1198void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp) 1198void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1199{ 1199{
1200 unsigned int idx; 1200 unsigned int idx;
1201 struct parisc_device *dev = parisc_parent(cujo); 1201 struct parisc_device *dev = parisc_parent(cujo);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index e7bbdf947bbc..8350ca2311c7 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -91,6 +91,8 @@ static int send_command(struct cros_ec_device *ec_dev,
91 usleep_range(10000, 11000); 91 usleep_range(10000, 11000);
92 92
93 ret = (*xfer_fxn)(ec_dev, status_msg); 93 ret = (*xfer_fxn)(ec_dev, status_msg);
94 if (ret == -EAGAIN)
95 continue;
94 if (ret < 0) 96 if (ret < 0)
95 break; 97 break;
96 98
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index bc309c5327ff..566644bb496a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -168,8 +168,8 @@ config DELL_WMI
168 depends on DMI 168 depends on DMI
169 depends on INPUT 169 depends on INPUT
170 depends on ACPI_VIDEO || ACPI_VIDEO = n 170 depends on ACPI_VIDEO || ACPI_VIDEO = n
171 depends on DELL_SMBIOS
171 select DELL_WMI_DESCRIPTOR 172 select DELL_WMI_DESCRIPTOR
172 select DELL_SMBIOS
173 select INPUT_SPARSEKMAP 173 select INPUT_SPARSEKMAP
174 ---help--- 174 ---help---
175 Say Y here if you want to support WMI-based hotkeys on Dell laptops. 175 Say Y here if you want to support WMI-based hotkeys on Dell laptops.
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a32c5c00e0e7..ffffb9909ae1 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -163,6 +163,16 @@ MODULE_LICENSE("GPL");
163 163
164static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; 164static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
165 165
166static bool ashs_present(void)
167{
168 int i = 0;
169 while (ashs_ids[i]) {
170 if (acpi_dev_found(ashs_ids[i++]))
171 return true;
172 }
173 return false;
174}
175
166struct bios_args { 176struct bios_args {
167 u32 arg0; 177 u32 arg0;
168 u32 arg1; 178 u32 arg1;
@@ -1025,6 +1035,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
1025 1035
1026static void asus_wmi_rfkill_exit(struct asus_wmi *asus) 1036static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
1027{ 1037{
1038 if (asus->driver->wlan_ctrl_by_user && ashs_present())
1039 return;
1040
1028 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); 1041 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
1029 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); 1042 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
1030 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); 1043 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
@@ -2121,16 +2134,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
2121 return 0; 2134 return 0;
2122} 2135}
2123 2136
2124static bool ashs_present(void)
2125{
2126 int i = 0;
2127 while (ashs_ids[i]) {
2128 if (acpi_dev_found(ashs_ids[i++]))
2129 return true;
2130 }
2131 return false;
2132}
2133
2134/* 2137/*
2135 * WMI Driver 2138 * WMI Driver
2136 */ 2139 */
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 360e06b20c53..ac18f2f27881 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -110,7 +110,7 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
110 UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */ 110 UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
111 UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */ 111 UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
112 UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */ 112 UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */
113 UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */ 113 UNIPHIER_RESETX(14, 0x200c, 5), /* USB30 */
114 UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ 114 UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
115 UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */ 115 UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
116 UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */ 116 UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
@@ -127,8 +127,8 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
127 UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */ 127 UNIPHIER_RESETX(6, 0x200c, 9), /* Ether0 */
128 UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */ 128 UNIPHIER_RESETX(7, 0x200c, 10), /* Ether1 */
129 UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */ 129 UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */
130 UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */ 130 UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link */
131 UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */ 131 UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link */
132 UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */ 132 UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */
133 UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */ 133 UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */
134 UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */ 134 UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 04143c08bd6e..02c03e418c27 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3034,7 +3034,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3034 cqr->callback_data = req; 3034 cqr->callback_data = req;
3035 cqr->status = DASD_CQR_FILLED; 3035 cqr->status = DASD_CQR_FILLED;
3036 cqr->dq = dq; 3036 cqr->dq = dq;
3037 req->completion_data = cqr; 3037 *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
3038
3038 blk_mq_start_request(req); 3039 blk_mq_start_request(req);
3039 spin_lock(&block->queue_lock); 3040 spin_lock(&block->queue_lock);
3040 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3041 list_add_tail(&cqr->blocklist, &block->ccw_queue);
@@ -3058,12 +3059,13 @@ out:
3058 */ 3059 */
3059enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3060enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3060{ 3061{
3061 struct dasd_ccw_req *cqr = req->completion_data;
3062 struct dasd_block *block = req->q->queuedata; 3062 struct dasd_block *block = req->q->queuedata;
3063 struct dasd_device *device; 3063 struct dasd_device *device;
3064 struct dasd_ccw_req *cqr;
3064 unsigned long flags; 3065 unsigned long flags;
3065 int rc = 0; 3066 int rc = 0;
3066 3067
3068 cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
3067 if (!cqr) 3069 if (!cqr)
3068 return BLK_EH_NOT_HANDLED; 3070 return BLK_EH_NOT_HANDLED;
3069 3071
@@ -3169,6 +3171,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
3169 int rc; 3171 int rc;
3170 3172
3171 block->tag_set.ops = &dasd_mq_ops; 3173 block->tag_set.ops = &dasd_mq_ops;
3174 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
3172 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3175 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
3173 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3176 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
3174 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3177 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 439991d71b14..4c14ce428e92 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
141 int i; 141 int i;
142 142
143 for (i = 0; i < nr_queues; i++) { 143 for (i = 0; i < nr_queues; i++) {
144 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 144 q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
145 if (!q) 145 if (!q)
146 return -ENOMEM; 146 return -ENOMEM;
147 147
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
456{ 456{
457 struct ciw *ciw; 457 struct ciw *ciw;
458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
459 int rc;
460 459
461 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 460 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
462 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 461 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
493 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 492 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
494 if (!ciw) { 493 if (!ciw) {
495 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 494 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
496 rc = -EINVAL; 495 return -EINVAL;
497 goto out_err;
498 } 496 }
499 irq_ptr->equeue = *ciw; 497 irq_ptr->equeue = *ciw;
500 498
501 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 499 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
502 if (!ciw) { 500 if (!ciw) {
503 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 501 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
504 rc = -EINVAL; 502 return -EINVAL;
505 goto out_err;
506 } 503 }
507 irq_ptr->aqueue = *ciw; 504 irq_ptr->aqueue = *ciw;
508 505
@@ -512,9 +509,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
512 init_data->cdev->handler = qdio_int_handler; 509 init_data->cdev->handler = qdio_int_handler;
513 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev)); 510 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
514 return 0; 511 return 0;
515out_err:
516 qdio_release_memory(irq_ptr);
517 return rc;
518} 512}
519 513
520void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 514void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 2c7550797ec2..dce92b2a895d 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
715 * and stores the result to ccwchain list. @cp must have been 715 * and stores the result to ccwchain list. @cp must have been
716 * initialized by a previous call with cp_init(). Otherwise, undefined 716 * initialized by a previous call with cp_init(). Otherwise, undefined
717 * behavior occurs. 717 * behavior occurs.
718 * For each chain composing the channel program:
719 * - On entry ch_len holds the count of CCWs to be translated.
720 * - On exit ch_len is adjusted to the count of successfully translated CCWs.
721 * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
718 * 722 *
719 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced 723 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
720 * as helpers to do ccw chain translation inside the kernel. Basically 724 * as helpers to do ccw chain translation inside the kernel. Basically
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
749 for (idx = 0; idx < len; idx++) { 753 for (idx = 0; idx < len; idx++) {
750 ret = ccwchain_fetch_one(chain, idx, cp); 754 ret = ccwchain_fetch_one(chain, idx, cp);
751 if (ret) 755 if (ret)
752 return ret; 756 goto out_err;
753 } 757 }
754 } 758 }
755 759
756 return 0; 760 return 0;
761out_err:
762 /* Only cleanup the chain elements that were actually translated. */
763 chain->ch_len = idx;
764 list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
765 chain->ch_len = 0;
766 }
767 return ret;
757} 768}
758 769
759/** 770/**
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a8b831000b2d..18c4f933e8b9 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Debug traces for zfcp. 5 * Debug traces for zfcp.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2017 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
308 spin_unlock_irqrestore(&dbf->rec_lock, flags); 308 spin_unlock_irqrestore(&dbf->rec_lock, flags);
309} 309}
310 310
311/**
312 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
313 * @tag: identifier for event
314 * @adapter: adapter on which the erp_action should run
315 * @port: remote port involved in the erp_action
316 * @sdev: scsi device involved in the erp_action
317 * @want: wanted erp_action
318 * @need: required erp_action
319 *
320 * The adapter->erp_lock must not be held.
321 */
322void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
323 struct zfcp_port *port, struct scsi_device *sdev,
324 u8 want, u8 need)
325{
326 unsigned long flags;
327
328 read_lock_irqsave(&adapter->erp_lock, flags);
329 zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
330 read_unlock_irqrestore(&adapter->erp_lock, flags);
331}
311 332
312/** 333/**
313 * zfcp_dbf_rec_run_lvl - trace event related to running recovery 334 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bf8ea4df2bb8..e5eed8aac0ce 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
4 * 4 *
5 * External function declarations. 5 * External function declarations.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2016 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#ifndef ZFCP_EXT_H 10#ifndef ZFCP_EXT_H
@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
35extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); 35extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
36extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, 36extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
37 struct zfcp_port *, struct scsi_device *, u8, u8); 37 struct zfcp_port *, struct scsi_device *, u8, u8);
38extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
39 struct zfcp_port *port,
40 struct scsi_device *sdev, u8 want, u8 need);
38extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); 41extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
39extern void zfcp_dbf_rec_run_lvl(int level, char *tag, 42extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
40 struct zfcp_erp_action *erp); 43 struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 4d2ba5682493..22f9562f415c 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Interface to Linux SCSI midlayer. 5 * Interface to Linux SCSI midlayer.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2017 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
618 ids.port_id = port->d_id; 618 ids.port_id = port->d_id;
619 ids.roles = FC_RPORT_ROLE_FCP_TARGET; 619 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
620 620
621 zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, 621 zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
622 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, 622 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
623 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); 623 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
624 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 624 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
625 if (!rport) { 625 if (!rport) {
626 dev_err(&port->adapter->ccw_device->dev, 626 dev_err(&port->adapter->ccw_device->dev,
@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
642 struct fc_rport *rport = port->rport; 642 struct fc_rport *rport = port->rport;
643 643
644 if (rport) { 644 if (rport) {
645 zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, 645 zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
646 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, 646 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
647 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); 647 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
648 fc_remote_port_delete(rport); 648 fc_remote_port_delete(rport);
649 port->rport = NULL; 649 port->rport = NULL;
650 } 650 }
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e29f9b8fd66d..56c940394729 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -182,7 +182,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o
182NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 182NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
183 183
184# Files generated that shall be removed upon make clean 184# Files generated that shall be removed upon make clean
185clean-files := 53c700_d.h 53c700_u.h 185clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
186 186
187$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h 187$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
188 188
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0156c9623c35..d62ddd63f4fe 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
724 int wait; 724 int wait;
725 unsigned long flags = 0; 725 unsigned long flags = 0;
726 unsigned long mflags = 0; 726 unsigned long mflags = 0;
727 struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
728 fibptr->hw_fib_va;
727 729
728 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA); 730 fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
729 if (callback) { 731 if (callback) {
@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
734 wait = 1; 736 wait = 1;
735 737
736 738
737 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) { 739 hbacmd->iu_type = command;
738 struct aac_hba_cmd_req *hbacmd =
739 (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
740 740
741 hbacmd->iu_type = command; 741 if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
742 /* bit1 of request_id must be 0 */ 742 /* bit1 of request_id must be 0 */
743 hbacmd->request_id = 743 hbacmd->request_id =
744 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); 744 cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c198b96368dd..5c40d809830f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1894,7 +1894,7 @@ retry:
1894 num = (rem_sz > scatter_elem_sz_prev) ? 1894 num = (rem_sz > scatter_elem_sz_prev) ?
1895 scatter_elem_sz_prev : rem_sz; 1895 scatter_elem_sz_prev : rem_sz;
1896 1896
1897 schp->pages[k] = alloc_pages(gfp_mask, order); 1897 schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
1898 if (!schp->pages[k]) 1898 if (!schp->pages[k])
1899 goto out; 1899 goto out;
1900 1900
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 2a21f2d48592..35fab1e18adc 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
188 struct scsi_device *SDev; 188 struct scsi_device *SDev;
189 struct scsi_sense_hdr sshdr; 189 struct scsi_sense_hdr sshdr;
190 int result, err = 0, retries = 0; 190 int result, err = 0, retries = 0;
191 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
191 192
192 SDev = cd->device; 193 SDev = cd->device;
193 194
195 if (cgc->sense)
196 senseptr = sense_buffer;
197
194 retry: 198 retry:
195 if (!scsi_block_when_processing_errors(SDev)) { 199 if (!scsi_block_when_processing_errors(SDev)) {
196 err = -ENODEV; 200 err = -ENODEV;
@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
198 } 202 }
199 203
200 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, 204 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
201 cgc->buffer, cgc->buflen, 205 cgc->buffer, cgc->buflen, senseptr, &sshdr,
202 (unsigned char *)cgc->sense, &sshdr,
203 cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); 206 cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
204 207
208 if (cgc->sense)
209 memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
210
205 /* Minimal error checking. Ignore cases we know about, and report the rest. */ 211 /* Minimal error checking. Ignore cases we know about, and report the rest. */
206 if (driver_byte(result) != 0) { 212 if (driver_byte(result) != 0) {
207 switch (sshdr.sense_key) { 213 switch (sshdr.sense_key) {
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c374e3b5c678..777e5f1e52d1 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
609 break; 609 break;
610 610
611 case BTSTAT_ABORTQUEUE: 611 case BTSTAT_ABORTQUEUE:
612 cmd->result = (DID_ABORT << 16); 612 cmd->result = (DID_BUS_BUSY << 16);
613 break; 613 break;
614 614
615 case BTSTAT_SCSIPARITY: 615 case BTSTAT_SCSIPARITY:
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index d9fcdb592b39..3e3d12ce4587 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -559,22 +559,28 @@ EXPORT_SYMBOL(tegra_powergate_remove_clamping);
559int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, 559int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
560 struct reset_control *rst) 560 struct reset_control *rst)
561{ 561{
562 struct tegra_powergate pg; 562 struct tegra_powergate *pg;
563 int err; 563 int err;
564 564
565 if (!tegra_powergate_is_available(id)) 565 if (!tegra_powergate_is_available(id))
566 return -EINVAL; 566 return -EINVAL;
567 567
568 pg.id = id; 568 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
569 pg.clks = &clk; 569 if (!pg)
570 pg.num_clks = 1; 570 return -ENOMEM;
571 pg.reset = rst;
572 pg.pmc = pmc;
573 571
574 err = tegra_powergate_power_up(&pg, false); 572 pg->id = id;
573 pg->clks = &clk;
574 pg->num_clks = 1;
575 pg->reset = rst;
576 pg->pmc = pmc;
577
578 err = tegra_powergate_power_up(pg, false);
575 if (err) 579 if (err)
576 pr_err("failed to turn on partition %d: %d\n", id, err); 580 pr_err("failed to turn on partition %d: %d\n", id, err);
577 581
582 kfree(pg);
583
578 return err; 584 return err;
579} 585}
580EXPORT_SYMBOL(tegra_powergate_sequence_power_up); 586EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index d5f3a70c06b0..283b2832728e 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -83,17 +83,16 @@ static int sdw_drv_probe(struct device *dev)
83 * attach to power domain but don't turn on (last arg) 83 * attach to power domain but don't turn on (last arg)
84 */ 84 */
85 ret = dev_pm_domain_attach(dev, false); 85 ret = dev_pm_domain_attach(dev, false);
86 if (ret != -EPROBE_DEFER) {
87 ret = drv->probe(slave, id);
88 if (ret) {
89 dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret);
90 dev_pm_domain_detach(dev, false);
91 }
92 }
93
94 if (ret) 86 if (ret)
95 return ret; 87 return ret;
96 88
89 ret = drv->probe(slave, id);
90 if (ret) {
91 dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret);
92 dev_pm_domain_detach(dev, false);
93 return ret;
94 }
95
97 /* device is probed so let's read the properties now */ 96 /* device is probed so let's read the properties now */
98 if (slave->ops && slave->ops->read_prop) 97 if (slave->ops && slave->ops->read_prop)
99 slave->ops->read_prop(slave); 98 slave->ops->read_prop(slave);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 1596d35498c5..6573152ce893 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
490 490
491static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi) 491static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
492{ 492{
493 if (!has_bspi(qspi) || (qspi->bspi_enabled)) 493 if (!has_bspi(qspi))
494 return; 494 return;
495 495
496 qspi->bspi_enabled = 1; 496 qspi->bspi_enabled = 1;
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
505 505
506static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi) 506static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
507{ 507{
508 if (!has_bspi(qspi) || (!qspi->bspi_enabled)) 508 if (!has_bspi(qspi))
509 return; 509 return;
510 510
511 qspi->bspi_enabled = 0; 511 qspi->bspi_enabled = 0;
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
519 519
520static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) 520static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
521{ 521{
522 u32 data = 0; 522 u32 rd = 0;
523 u32 wr = 0;
523 524
524 if (qspi->curr_cs == cs)
525 return;
526 if (qspi->base[CHIP_SELECT]) { 525 if (qspi->base[CHIP_SELECT]) {
527 data = bcm_qspi_read(qspi, CHIP_SELECT, 0); 526 rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
528 data = (data & ~0xff) | (1 << cs); 527 wr = (rd & ~0xff) | (1 << cs);
529 bcm_qspi_write(qspi, CHIP_SELECT, 0, data); 528 if (rd == wr)
529 return;
530 bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
530 usleep_range(10, 20); 531 usleep_range(10, 20);
531 } 532 }
533
534 dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
532 qspi->curr_cs = cs; 535 qspi->curr_cs = cs;
533} 536}
534 537
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
755 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); 758 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
756 } 759 }
757 mspi_cdram = MSPI_CDRAM_CONT_BIT; 760 mspi_cdram = MSPI_CDRAM_CONT_BIT;
758 mspi_cdram |= (~(1 << spi->chip_select) & 761
759 MSPI_CDRAM_PCS); 762 if (has_bspi(qspi))
763 mspi_cdram &= ~1;
764 else
765 mspi_cdram |= (~(1 << spi->chip_select) &
766 MSPI_CDRAM_PCS);
767
760 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : 768 mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
761 MSPI_CDRAM_BITSE_BIT); 769 MSPI_CDRAM_BITSE_BIT);
762 770
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 1431cb98fe40..3094d818cf06 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
184 struct bcm2835aux_spi *bs = spi_master_get_devdata(master); 184 struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
185 irqreturn_t ret = IRQ_NONE; 185 irqreturn_t ret = IRQ_NONE;
186 186
187 /* IRQ may be shared, so return if our interrupts are disabled */
188 if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
189 (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
190 return ret;
191
187 /* check if we have data to read */ 192 /* check if we have data to read */
188 while (bs->rx_len && 193 while (bs->rx_len &&
189 (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) & 194 (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 5c9516ae4942..4a001634023e 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
313 313
314 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && 314 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
315 (xspi->tx_bytes > 0)) { 315 (xspi->tx_bytes > 0)) {
316
317 /* When xspi in busy condition, bytes may send failed,
318 * then spi control did't work thoroughly, add one byte delay
319 */
320 if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
321 CDNS_SPI_IXR_TXFULL)
322 usleep_range(10, 20);
323
316 if (xspi->txbuf) 324 if (xspi->txbuf)
317 cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); 325 cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
318 else 326 else
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6f57592a7f95..a056ee88a960 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1701,7 +1701,7 @@ static struct platform_driver spi_imx_driver = {
1701}; 1701};
1702module_platform_driver(spi_imx_driver); 1702module_platform_driver(spi_imx_driver);
1703 1703
1704MODULE_DESCRIPTION("SPI Master Controller driver"); 1704MODULE_DESCRIPTION("SPI Controller driver");
1705MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 1705MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1706MODULE_LICENSE("GPL"); 1706MODULE_LICENSE("GPL");
1707MODULE_ALIAS("platform:" DRIVER_NAME); 1707MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 513ec6c6e25b..0ae7defd3492 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
38 38
39 /* SSP register addresses */ 39 /* SSP register addresses */
40 void __iomem *ioaddr; 40 void __iomem *ioaddr;
41 u32 ssdr_physical; 41 phys_addr_t ssdr_physical;
42 42
43 /* SSP masks*/ 43 /* SSP masks*/
44 u32 dma_cr1; 44 u32 dma_cr1;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index ae086aab57d5..8171eedbfc90 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
283 } 283 }
284 284
285 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1); 285 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
286 brps = min_t(int, brps, 32);
286 287
287 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps); 288 scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
288 sh_msiof_write(p, TSCR, scr); 289 sh_msiof_write(p, TSCR, scr);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 7b213faa0a2b..eeab67f50580 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -356,11 +356,12 @@ static int spi_drv_probe(struct device *dev)
356 } 356 }
357 357
358 ret = dev_pm_domain_attach(dev, true); 358 ret = dev_pm_domain_attach(dev, true);
359 if (ret != -EPROBE_DEFER) { 359 if (ret)
360 ret = sdrv->probe(spi); 360 return ret;
361 if (ret) 361
362 dev_pm_domain_detach(dev, true); 362 ret = sdrv->probe(spi);
363 } 363 if (ret)
364 dev_pm_domain_detach(dev, true);
364 365
365 return ret; 366 return ret;
366} 367}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 9371651d8017..c574dd210500 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -117,7 +117,7 @@ config SSB_SERIAL
117 117
118config SSB_DRIVER_PCICORE_POSSIBLE 118config SSB_DRIVER_PCICORE_POSSIBLE
119 bool 119 bool
120 depends on SSB_PCIHOST && SSB = y 120 depends on SSB_PCIHOST
121 default y 121 default y
122 122
123config SSB_DRIVER_PCICORE 123config SSB_DRIVER_PCICORE
@@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE
131 131
132config SSB_PCICORE_HOSTMODE 132config SSB_PCICORE_HOSTMODE
133 bool "Hostmode support for SSB PCI core" 133 bool "Hostmode support for SSB PCI core"
134 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS 134 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y
135 help 135 help
136 PCIcore hostmode operation (external PCI bus). 136 PCIcore hostmode operation (external PCI bus).
137 137
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index ad049e6f24e4..f3b1ad4bd3dc 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -34,7 +34,7 @@ config LNET_SELFTEST
34 34
35config LNET_XPRT_IB 35config LNET_XPRT_IB
36 tristate "LNET infiniband support" 36 tristate "LNET infiniband support"
37 depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS 37 depends on LNET && PCI && INFINIBAND_ADDR_TRANS
38 default LNET && INFINIBAND 38 default LNET && INFINIBAND
39 help 39 help
40 This option allows the LNET users to use infiniband as an 40 This option allows the LNET users to use infiniband as an
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4ad89ea71a70..4f26bdc3d1dc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -2121,6 +2121,8 @@ static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2121 2121
2122 if (val >= 0) { 2122 if (val >= 0) {
2123 udev->qfull_time_out = val * MSEC_PER_SEC; 2123 udev->qfull_time_out = val * MSEC_PER_SEC;
2124 } else if (val == -1) {
2125 udev->qfull_time_out = val;
2124 } else { 2126 } else {
2125 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2127 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2126 return -EINVAL; 2128 return -EINVAL;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 0124a91c8d71..dd46b758852a 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -238,6 +238,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
238 if (IS_ERR(shm)) 238 if (IS_ERR(shm))
239 return PTR_ERR(shm); 239 return PTR_ERR(shm);
240 240
241 /*
242 * Ensure offset + size does not overflow offset
243 * and does not overflow the size of the referred
244 * shared memory object.
245 */
246 if ((ip.a + ip.b) < ip.a ||
247 (ip.a + ip.b) > shm->size) {
248 tee_shm_put(shm);
249 return -EINVAL;
250 }
251
241 params[n].u.memref.shm_offs = ip.a; 252 params[n].u.memref.shm_offs = ip.a;
242 params[n].u.memref.size = ip.b; 253 params[n].u.memref.size = ip.b;
243 params[n].u.memref.shm = shm; 254 params[n].u.memref.shm = shm;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 556960a1bab3..07d3be6f0780 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
360 if (!(shm->flags & TEE_SHM_DMA_BUF)) 360 if (!(shm->flags & TEE_SHM_DMA_BUF))
361 return -EINVAL; 361 return -EINVAL;
362 362
363 get_dma_buf(shm->dmabuf);
363 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 364 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
364 if (fd >= 0) 365 if (fd < 0)
365 get_dma_buf(shm->dmabuf); 366 dma_buf_put(shm->dmabuf);
366 return fd; 367 return fd;
367} 368}
368 369
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 72ebbc908e19..32cd52ca8318 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
354 354
355 slot_id = 0; 355 slot_id = 0;
356 for (i = 0; i < MAX_HC_SLOTS; i++) { 356 for (i = 0; i < MAX_HC_SLOTS; i++) {
357 if (!xhci->devs[i]) 357 if (!xhci->devs[i] || !xhci->devs[i]->udev)
358 continue; 358 continue;
359 speed = xhci->devs[i]->udev->speed; 359 speed = xhci->devs[i]->udev->speed;
360 if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3)) 360 if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e7f99d55922a..15a42cee0a9c 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2524,8 +2524,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
2524{ 2524{
2525 struct musb *musb = hcd_to_musb(hcd); 2525 struct musb *musb = hcd_to_musb(hcd);
2526 u8 devctl; 2526 u8 devctl;
2527 int ret;
2527 2528
2528 musb_port_suspend(musb, true); 2529 ret = musb_port_suspend(musb, true);
2530 if (ret)
2531 return ret;
2529 2532
2530 if (!is_host_active(musb)) 2533 if (!is_host_active(musb))
2531 return 0; 2534 return 0;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 72392bbcd0a4..2999845632ce 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8);
67extern void musb_root_disconnect(struct musb *musb); 67extern void musb_root_disconnect(struct musb *musb);
68extern void musb_host_resume_root_hub(struct musb *musb); 68extern void musb_host_resume_root_hub(struct musb *musb);
69extern void musb_host_poke_root_hub(struct musb *musb); 69extern void musb_host_poke_root_hub(struct musb *musb);
70extern void musb_port_suspend(struct musb *musb, bool do_suspend); 70extern int musb_port_suspend(struct musb *musb, bool do_suspend);
71extern void musb_port_reset(struct musb *musb, bool do_reset); 71extern void musb_port_reset(struct musb *musb, bool do_reset);
72extern void musb_host_finish_resume(struct work_struct *work); 72extern void musb_host_finish_resume(struct work_struct *work);
73#else 73#else
@@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
99static inline void musb_host_resume_root_hub(struct musb *musb) {} 99static inline void musb_host_resume_root_hub(struct musb *musb) {}
100static inline void musb_host_poll_rh_status(struct musb *musb) {} 100static inline void musb_host_poll_rh_status(struct musb *musb) {}
101static inline void musb_host_poke_root_hub(struct musb *musb) {} 101static inline void musb_host_poke_root_hub(struct musb *musb) {}
102static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {} 102static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
103{
104 return 0;
105}
103static inline void musb_port_reset(struct musb *musb, bool do_reset) {} 106static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
104static inline void musb_host_finish_resume(struct work_struct *work) {} 107static inline void musb_host_finish_resume(struct work_struct *work) {}
105#endif 108#endif
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 5165d2b07ade..2f8dd9826e94 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work)
48 spin_unlock_irqrestore(&musb->lock, flags); 48 spin_unlock_irqrestore(&musb->lock, flags);
49} 49}
50 50
51void musb_port_suspend(struct musb *musb, bool do_suspend) 51int musb_port_suspend(struct musb *musb, bool do_suspend)
52{ 52{
53 struct usb_otg *otg = musb->xceiv->otg; 53 struct usb_otg *otg = musb->xceiv->otg;
54 u8 power; 54 u8 power;
55 void __iomem *mbase = musb->mregs; 55 void __iomem *mbase = musb->mregs;
56 56
57 if (!is_host_active(musb)) 57 if (!is_host_active(musb))
58 return; 58 return 0;
59 59
60 /* NOTE: this doesn't necessarily put PHY into low power mode, 60 /* NOTE: this doesn't necessarily put PHY into low power mode,
61 * turning off its clock; that's a function of PHY integration and 61 * turning off its clock; that's a function of PHY integration and
@@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
66 if (do_suspend) { 66 if (do_suspend) {
67 int retries = 10000; 67 int retries = 10000;
68 68
69 power &= ~MUSB_POWER_RESUME; 69 if (power & MUSB_POWER_RESUME)
70 power |= MUSB_POWER_SUSPENDM; 70 return -EBUSY;
71 musb_writeb(mbase, MUSB_POWER, power);
72 71
73 /* Needed for OPT A tests */ 72 if (!(power & MUSB_POWER_SUSPENDM)) {
74 power = musb_readb(mbase, MUSB_POWER); 73 power |= MUSB_POWER_SUSPENDM;
75 while (power & MUSB_POWER_SUSPENDM) { 74 musb_writeb(mbase, MUSB_POWER, power);
75
76 /* Needed for OPT A tests */
76 power = musb_readb(mbase, MUSB_POWER); 77 power = musb_readb(mbase, MUSB_POWER);
77 if (retries-- < 1) 78 while (power & MUSB_POWER_SUSPENDM) {
78 break; 79 power = musb_readb(mbase, MUSB_POWER);
80 if (retries-- < 1)
81 break;
82 }
79 } 83 }
80 84
81 musb_dbg(musb, "Root port suspended, power %02x", power); 85 musb_dbg(musb, "Root port suspended, power %02x", power);
@@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
111 schedule_delayed_work(&musb->finish_resume_work, 115 schedule_delayed_work(&musb->finish_resume_work,
112 msecs_to_jiffies(USB_RESUME_TIMEOUT)); 116 msecs_to_jiffies(USB_RESUME_TIMEOUT));
113 } 117 }
118 return 0;
114} 119}
115 120
116void musb_port_reset(struct musb *musb, bool do_reset) 121void musb_port_reset(struct musb *musb, bool do_reset)
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 14a72357800a..35618ceb2791 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -73,6 +73,7 @@ struct bus_id_priv {
73 struct stub_device *sdev; 73 struct stub_device *sdev;
74 struct usb_device *udev; 74 struct usb_device *udev;
75 char shutdown_busid; 75 char shutdown_busid;
76 spinlock_t busid_lock;
76}; 77};
77 78
78/* stub_priv is allocated from stub_priv_cache */ 79/* stub_priv is allocated from stub_priv_cache */
@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver;
83 84
84/* stub_main.c */ 85/* stub_main.c */
85struct bus_id_priv *get_busid_priv(const char *busid); 86struct bus_id_priv *get_busid_priv(const char *busid);
87void put_busid_priv(struct bus_id_priv *bid);
86int del_match_busid(char *busid); 88int del_match_busid(char *busid);
87void stub_device_cleanup_urbs(struct stub_device *sdev); 89void stub_device_cleanup_urbs(struct stub_device *sdev);
88 90
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index dd8ef36ab10e..c0d6ff1baa72 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev)
300 struct stub_device *sdev = NULL; 300 struct stub_device *sdev = NULL;
301 const char *udev_busid = dev_name(&udev->dev); 301 const char *udev_busid = dev_name(&udev->dev);
302 struct bus_id_priv *busid_priv; 302 struct bus_id_priv *busid_priv;
303 int rc; 303 int rc = 0;
304 304
305 dev_dbg(&udev->dev, "Enter\n"); 305 dev_dbg(&udev->dev, "Enter probe\n");
306 306
307 /* check we should claim or not by busid_table */ 307 /* check we should claim or not by busid_table */
308 busid_priv = get_busid_priv(udev_busid); 308 busid_priv = get_busid_priv(udev_busid);
@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev)
317 * other matched drivers by the driver core. 317 * other matched drivers by the driver core.
318 * See driver_probe_device() in driver/base/dd.c 318 * See driver_probe_device() in driver/base/dd.c
319 */ 319 */
320 return -ENODEV; 320 rc = -ENODEV;
321 goto call_put_busid_priv;
321 } 322 }
322 323
323 if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) { 324 if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
324 dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n", 325 dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
325 udev_busid); 326 udev_busid);
326 return -ENODEV; 327 rc = -ENODEV;
328 goto call_put_busid_priv;
327 } 329 }
328 330
329 if (!strcmp(udev->bus->bus_name, "vhci_hcd")) { 331 if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev)
331 "%s is attached on vhci_hcd... skip!\n", 333 "%s is attached on vhci_hcd... skip!\n",
332 udev_busid); 334 udev_busid);
333 335
334 return -ENODEV; 336 rc = -ENODEV;
337 goto call_put_busid_priv;
335 } 338 }
336 339
337 /* ok, this is my device */ 340 /* ok, this is my device */
338 sdev = stub_device_alloc(udev); 341 sdev = stub_device_alloc(udev);
339 if (!sdev) 342 if (!sdev) {
340 return -ENOMEM; 343 rc = -ENOMEM;
344 goto call_put_busid_priv;
345 }
341 346
342 dev_info(&udev->dev, 347 dev_info(&udev->dev,
343 "usbip-host: register new device (bus %u dev %u)\n", 348 "usbip-host: register new device (bus %u dev %u)\n",
@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev)
369 } 374 }
370 busid_priv->status = STUB_BUSID_ALLOC; 375 busid_priv->status = STUB_BUSID_ALLOC;
371 376
372 return 0; 377 rc = 0;
378 goto call_put_busid_priv;
379
373err_files: 380err_files:
374 usb_hub_release_port(udev->parent, udev->portnum, 381 usb_hub_release_port(udev->parent, udev->portnum,
375 (struct usb_dev_state *) udev); 382 (struct usb_dev_state *) udev);
@@ -379,6 +386,9 @@ err_port:
379 386
380 busid_priv->sdev = NULL; 387 busid_priv->sdev = NULL;
381 stub_device_free(sdev); 388 stub_device_free(sdev);
389
390call_put_busid_priv:
391 put_busid_priv(busid_priv);
382 return rc; 392 return rc;
383} 393}
384 394
@@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev)
404 struct bus_id_priv *busid_priv; 414 struct bus_id_priv *busid_priv;
405 int rc; 415 int rc;
406 416
407 dev_dbg(&udev->dev, "Enter\n"); 417 dev_dbg(&udev->dev, "Enter disconnect\n");
408 418
409 busid_priv = get_busid_priv(udev_busid); 419 busid_priv = get_busid_priv(udev_busid);
410 if (!busid_priv) { 420 if (!busid_priv) {
@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev)
417 /* get stub_device */ 427 /* get stub_device */
418 if (!sdev) { 428 if (!sdev) {
419 dev_err(&udev->dev, "could not get device"); 429 dev_err(&udev->dev, "could not get device");
420 return; 430 goto call_put_busid_priv;
421 } 431 }
422 432
423 dev_set_drvdata(&udev->dev, NULL); 433 dev_set_drvdata(&udev->dev, NULL);
@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev)
432 (struct usb_dev_state *) udev); 442 (struct usb_dev_state *) udev);
433 if (rc) { 443 if (rc) {
434 dev_dbg(&udev->dev, "unable to release port\n"); 444 dev_dbg(&udev->dev, "unable to release port\n");
435 return; 445 goto call_put_busid_priv;
436 } 446 }
437 447
438 /* If usb reset is called from event handler */ 448 /* If usb reset is called from event handler */
439 if (usbip_in_eh(current)) 449 if (usbip_in_eh(current))
440 return; 450 goto call_put_busid_priv;
441 451
442 /* shutdown the current connection */ 452 /* shutdown the current connection */
443 shutdown_busid(busid_priv); 453 shutdown_busid(busid_priv);
@@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev)
448 busid_priv->sdev = NULL; 458 busid_priv->sdev = NULL;
449 stub_device_free(sdev); 459 stub_device_free(sdev);
450 460
451 if (busid_priv->status == STUB_BUSID_ALLOC) { 461 if (busid_priv->status == STUB_BUSID_ALLOC)
452 busid_priv->status = STUB_BUSID_ADDED; 462 busid_priv->status = STUB_BUSID_ADDED;
453 } else { 463
454 busid_priv->status = STUB_BUSID_OTHER; 464call_put_busid_priv:
455 del_match_busid((char *)udev_busid); 465 put_busid_priv(busid_priv);
456 }
457} 466}
458 467
459#ifdef CONFIG_PM 468#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index d41d0cdeec0f..bf8a5feb0ee9 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -14,6 +14,7 @@
14#define DRIVER_DESC "USB/IP Host Driver" 14#define DRIVER_DESC "USB/IP Host Driver"
15 15
16struct kmem_cache *stub_priv_cache; 16struct kmem_cache *stub_priv_cache;
17
17/* 18/*
18 * busid_tables defines matching busids that usbip can grab. A user can change 19 * busid_tables defines matching busids that usbip can grab. A user can change
19 * dynamically what device is locally used and what device is exported to a 20 * dynamically what device is locally used and what device is exported to a
@@ -25,6 +26,8 @@ static spinlock_t busid_table_lock;
25 26
26static void init_busid_table(void) 27static void init_busid_table(void)
27{ 28{
29 int i;
30
28 /* 31 /*
29 * This also sets the bus_table[i].status to 32 * This also sets the bus_table[i].status to
30 * STUB_BUSID_OTHER, which is 0. 33 * STUB_BUSID_OTHER, which is 0.
@@ -32,6 +35,9 @@ static void init_busid_table(void)
32 memset(busid_table, 0, sizeof(busid_table)); 35 memset(busid_table, 0, sizeof(busid_table));
33 36
34 spin_lock_init(&busid_table_lock); 37 spin_lock_init(&busid_table_lock);
38
39 for (i = 0; i < MAX_BUSID; i++)
40 spin_lock_init(&busid_table[i].busid_lock);
35} 41}
36 42
37/* 43/*
@@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid)
43 int i; 49 int i;
44 int idx = -1; 50 int idx = -1;
45 51
46 for (i = 0; i < MAX_BUSID; i++) 52 for (i = 0; i < MAX_BUSID; i++) {
53 spin_lock(&busid_table[i].busid_lock);
47 if (busid_table[i].name[0]) 54 if (busid_table[i].name[0])
48 if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) { 55 if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
49 idx = i; 56 idx = i;
57 spin_unlock(&busid_table[i].busid_lock);
50 break; 58 break;
51 } 59 }
60 spin_unlock(&busid_table[i].busid_lock);
61 }
52 return idx; 62 return idx;
53} 63}
54 64
65/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
55struct bus_id_priv *get_busid_priv(const char *busid) 66struct bus_id_priv *get_busid_priv(const char *busid)
56{ 67{
57 int idx; 68 int idx;
@@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
59 70
60 spin_lock(&busid_table_lock); 71 spin_lock(&busid_table_lock);
61 idx = get_busid_idx(busid); 72 idx = get_busid_idx(busid);
62 if (idx >= 0) 73 if (idx >= 0) {
63 bid = &(busid_table[idx]); 74 bid = &(busid_table[idx]);
75 /* get busid_lock before returning */
76 spin_lock(&bid->busid_lock);
77 }
64 spin_unlock(&busid_table_lock); 78 spin_unlock(&busid_table_lock);
65 79
66 return bid; 80 return bid;
67} 81}
68 82
83void put_busid_priv(struct bus_id_priv *bid)
84{
85 if (bid)
86 spin_unlock(&bid->busid_lock);
87}
88
69static int add_match_busid(char *busid) 89static int add_match_busid(char *busid)
70{ 90{
71 int i; 91 int i;
@@ -78,15 +98,19 @@ static int add_match_busid(char *busid)
78 goto out; 98 goto out;
79 } 99 }
80 100
81 for (i = 0; i < MAX_BUSID; i++) 101 for (i = 0; i < MAX_BUSID; i++) {
102 spin_lock(&busid_table[i].busid_lock);
82 if (!busid_table[i].name[0]) { 103 if (!busid_table[i].name[0]) {
83 strlcpy(busid_table[i].name, busid, BUSID_SIZE); 104 strlcpy(busid_table[i].name, busid, BUSID_SIZE);
84 if ((busid_table[i].status != STUB_BUSID_ALLOC) && 105 if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
85 (busid_table[i].status != STUB_BUSID_REMOV)) 106 (busid_table[i].status != STUB_BUSID_REMOV))
86 busid_table[i].status = STUB_BUSID_ADDED; 107 busid_table[i].status = STUB_BUSID_ADDED;
87 ret = 0; 108 ret = 0;
109 spin_unlock(&busid_table[i].busid_lock);
88 break; 110 break;
89 } 111 }
112 spin_unlock(&busid_table[i].busid_lock);
113 }
90 114
91out: 115out:
92 spin_unlock(&busid_table_lock); 116 spin_unlock(&busid_table_lock);
@@ -107,6 +131,8 @@ int del_match_busid(char *busid)
107 /* found */ 131 /* found */
108 ret = 0; 132 ret = 0;
109 133
134 spin_lock(&busid_table[idx].busid_lock);
135
110 if (busid_table[idx].status == STUB_BUSID_OTHER) 136 if (busid_table[idx].status == STUB_BUSID_OTHER)
111 memset(busid_table[idx].name, 0, BUSID_SIZE); 137 memset(busid_table[idx].name, 0, BUSID_SIZE);
112 138
@@ -114,6 +140,7 @@ int del_match_busid(char *busid)
114 (busid_table[idx].status != STUB_BUSID_ADDED)) 140 (busid_table[idx].status != STUB_BUSID_ADDED))
115 busid_table[idx].status = STUB_BUSID_REMOV; 141 busid_table[idx].status = STUB_BUSID_REMOV;
116 142
143 spin_unlock(&busid_table[idx].busid_lock);
117out: 144out:
118 spin_unlock(&busid_table_lock); 145 spin_unlock(&busid_table_lock);
119 146
@@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
126 char *out = buf; 153 char *out = buf;
127 154
128 spin_lock(&busid_table_lock); 155 spin_lock(&busid_table_lock);
129 for (i = 0; i < MAX_BUSID; i++) 156 for (i = 0; i < MAX_BUSID; i++) {
157 spin_lock(&busid_table[i].busid_lock);
130 if (busid_table[i].name[0]) 158 if (busid_table[i].name[0])
131 out += sprintf(out, "%s ", busid_table[i].name); 159 out += sprintf(out, "%s ", busid_table[i].name);
160 spin_unlock(&busid_table[i].busid_lock);
161 }
132 spin_unlock(&busid_table_lock); 162 spin_unlock(&busid_table_lock);
133 out += sprintf(out, "\n"); 163 out += sprintf(out, "\n");
134 164
@@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
169} 199}
170static DRIVER_ATTR_RW(match_busid); 200static DRIVER_ATTR_RW(match_busid);
171 201
202static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
203{
204 int ret;
205
206 /* device_attach() callers should hold parent lock for USB */
207 if (busid_priv->udev->dev.parent)
208 device_lock(busid_priv->udev->dev.parent);
209 ret = device_attach(&busid_priv->udev->dev);
210 if (busid_priv->udev->dev.parent)
211 device_unlock(busid_priv->udev->dev.parent);
212 if (ret < 0) {
213 dev_err(&busid_priv->udev->dev, "rebind failed\n");
214 return ret;
215 }
216 return 0;
217}
218
219static void stub_device_rebind(void)
220{
221#if IS_MODULE(CONFIG_USBIP_HOST)
222 struct bus_id_priv *busid_priv;
223 int i;
224
225 /* update status to STUB_BUSID_OTHER so probe ignores the device */
226 spin_lock(&busid_table_lock);
227 for (i = 0; i < MAX_BUSID; i++) {
228 if (busid_table[i].name[0] &&
229 busid_table[i].shutdown_busid) {
230 busid_priv = &(busid_table[i]);
231 busid_priv->status = STUB_BUSID_OTHER;
232 }
233 }
234 spin_unlock(&busid_table_lock);
235
236 /* now run rebind - no need to hold locks. driver files are removed */
237 for (i = 0; i < MAX_BUSID; i++) {
238 if (busid_table[i].name[0] &&
239 busid_table[i].shutdown_busid) {
240 busid_priv = &(busid_table[i]);
241 do_rebind(busid_table[i].name, busid_priv);
242 }
243 }
244#endif
245}
246
172static ssize_t rebind_store(struct device_driver *dev, const char *buf, 247static ssize_t rebind_store(struct device_driver *dev, const char *buf,
173 size_t count) 248 size_t count)
174{ 249{
@@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
186 if (!bid) 261 if (!bid)
187 return -ENODEV; 262 return -ENODEV;
188 263
189 /* device_attach() callers should hold parent lock for USB */ 264 /* mark the device for deletion so probe ignores it during rescan */
190 if (bid->udev->dev.parent) 265 bid->status = STUB_BUSID_OTHER;
191 device_lock(bid->udev->dev.parent); 266 /* release the busid lock */
192 ret = device_attach(&bid->udev->dev); 267 put_busid_priv(bid);
193 if (bid->udev->dev.parent) 268
194 device_unlock(bid->udev->dev.parent); 269 ret = do_rebind((char *) buf, bid);
195 if (ret < 0) { 270 if (ret < 0)
196 dev_err(&bid->udev->dev, "rebind failed\n");
197 return ret; 271 return ret;
198 } 272
273 /* delete device from busid_table */
274 del_match_busid((char *) buf);
199 275
200 return count; 276 return count;
201} 277}
@@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void)
317 */ 393 */
318 usb_deregister_device_driver(&stub_driver); 394 usb_deregister_device_driver(&stub_driver);
319 395
396 /* initiate scan to attach devices */
397 stub_device_rebind();
398
320 kmem_cache_destroy(stub_priv_cache); 399 kmem_cache_destroy(stub_priv_cache);
321} 400}
322 401
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f3bd8e941224..f0be5f35ab28 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
981{ 981{
982 int ret = 0; 982 int ret = 0;
983 983
984 mutex_lock(&dev->mutex);
984 vhost_dev_lock_vqs(dev); 985 vhost_dev_lock_vqs(dev);
985 switch (msg->type) { 986 switch (msg->type) {
986 case VHOST_IOTLB_UPDATE: 987 case VHOST_IOTLB_UPDATE:
@@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1016 } 1017 }
1017 1018
1018 vhost_dev_unlock_vqs(dev); 1019 vhost_dev_unlock_vqs(dev);
1020 mutex_unlock(&dev->mutex);
1021
1019 return ret; 1022 return ret;
1020} 1023}
1021ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1024ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index e1c60899fdbc..a6f9ba85dc4b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -351,7 +351,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
351 * physical address */ 351 * physical address */
352 phys = xen_bus_to_phys(dev_addr); 352 phys = xen_bus_to_phys(dev_addr);
353 353
354 if (((dev_addr + size - 1 > dma_mask)) || 354 if (((dev_addr + size - 1 <= dma_mask)) ||
355 range_straddles_page_boundary(phys, size)) 355 range_straddles_page_boundary(phys, size))
356 xen_destroy_contiguous_region(phys, order); 356 xen_destroy_contiguous_region(phys, order);
357 357