aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_lpss.c18
-rw-r--r--drivers/acpi/acpica/hwsleep.c15
-rw-r--r--drivers/acpi/acpica/uterror.c6
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/acpi/ec.c20
-rw-r--r--drivers/acpi/nfit/core.c48
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/acpi/osl.c72
-rw-r--r--drivers/acpi/pptt.c10
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c60
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c41
-rw-r--r--drivers/ata/libata-scsi.c18
-rw-r--r--drivers/ata/sata_fsl.c9
-rw-r--r--drivers/ata/sata_nv.c3
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/core.c15
-rw-r--r--drivers/base/dma-coherent.c434
-rw-r--r--drivers/base/dma-contiguous.c278
-rw-r--r--drivers/base/dma-mapping.c345
-rw-r--r--drivers/base/power/domain.c23
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/nbd.c42
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bus/ti-sysc.c8
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc.c31
-rw-r--r--drivers/char/random.c29
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c2
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/sunxi-ng/Makefile39
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/timer-stm32.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c27
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c33
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c5
-rw-r--r--drivers/dax/device.c12
-rw-r--r--drivers/dax/super.c8
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ti/omap-dma.c6
-rw-r--r--drivers/firmware/dmi-id.c2
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/firmware/efi/libstub/tpm.c2
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c65
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h5
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c9
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c365
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c44
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c58
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h24
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c49
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c12
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c25
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c11
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-steam.c10
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c22
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hid/wacom_sys.c8
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c8
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c3
-rw-r--r--drivers/i2c/busses/i2c-gpio.c4
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c17
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/i2c/i2c-core-smbus.c14
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c2
-rw-r--r--drivers/iio/light/tsl2772.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c5
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c28
-rw-r--r--drivers/infiniband/core/uverbs_main.c14
-rw-r--r--drivers/infiniband/core/verbs.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h4
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/main.c38
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c18
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c5
-rw-r--r--drivers/input/input-mt.c12
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/goldfish_events.c9
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/sc27xx-vibra.c154
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c10
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/mouse/psmouse-base.c12
-rw-r--r--drivers/input/rmi4/Kconfig1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c34
-rw-r--r--drivers/input/rmi4/rmi_bus.c50
-rw-r--r--drivers/input/rmi4/rmi_bus.h10
-rw-r--r--drivers/input/rmi4/rmi_driver.c52
-rw-r--r--drivers/input/rmi4/rmi_f01.c10
-rw-r--r--drivers/input/rmi4/rmi_f03.c9
-rw-r--r--drivers/input/rmi4/rmi_f11.c42
-rw-r--r--drivers/input/rmi4/rmi_f12.c8
-rw-r--r--drivers/input/rmi4/rmi_f30.c9
-rw-r--r--drivers/input/rmi4/rmi_f34.c5
-rw-r--r--drivers/input/rmi4/rmi_f54.c6
-rw-r--r--drivers/input/touchscreen/silead.c1
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/intel-iommu.c62
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c62
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c10
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/lightnvm/Kconfig2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm-writecache.c10
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/media/rc/bpf-lirc.c14
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/mei/interrupt.c5
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c21
-rw-r--r--drivers/mmc/host/sunxi-mmc.c7
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c19
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c6
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/nand_base.c2
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c48
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c6
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/arc/Kconfig6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c8
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c5
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/marvell/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/efx.c1
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/sun/sungem.c22
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c19
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hamradio/bpqether.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c37
-rw-r--r--drivers/net/hyperv/netvsc_drv.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c61
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c40
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/lan78xx.c37
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig2
-rw-r--r--drivers/net/xen-netfront.c11
-rw-r--r--drivers/nfc/pn533/usb.c4
-rw-r--r--drivers/nvdimm/claim.c1
-rw-r--r--drivers/nvdimm/pmem.c3
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c44
-rw-r--r--drivers/nvme/host/rdma.c76
-rw-r--r--drivers/nvme/target/core.c8
-rw-r--r--drivers/nvmem/core.c4
-rw-r--r--drivers/opp/core.c2
-rw-r--r--drivers/pci/Makefile6
-rw-r--r--drivers/pci/controller/Kconfig3
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/pci-ftpci100.c2
-rw-r--r--drivers/pci/controller/pcie-rcar.c16
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx.c1
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c10
-rw-r--r--drivers/pci/iov.c16
-rw-r--r--drivers/pci/pci-acpi.c12
-rw-r--r--drivers/pci/pci-driver.c1
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c2
-rw-r--r--drivers/pinctrl/devicetree.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/pinctrl-single.c14
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/ptp/ptp_qoriq.c2
-rw-r--r--drivers/s390/block/dasd.c184
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c46
-rw-r--r--drivers/s390/block/dasd_eer.c10
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h34
-rw-r--r--drivers/s390/cio/Makefile1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c140
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c5
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c17
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h54
-rw-r--r--drivers/s390/net/qeth_core.h13
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c24
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/scsi/aacraid/aachba.c15
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c7
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sg.c42
-rw-r--r--drivers/scsi/xen-scsifront.c33
-rw-r--r--drivers/soc/imx/gpcv2.c13
-rw-r--r--drivers/soc/qcom/Kconfig3
-rw-r--r--drivers/soc/renesas/rcar-sysc.c35
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2
-rw-r--r--drivers/staging/rtlwifi/wifi.h1
-rw-r--r--drivers/staging/typec/Kconfig1
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_user.c44
-rw-r--r--drivers/thunderbolt/domain.c4
-rw-r--r--drivers/tty/n_tty.c55
-rw-r--r--drivers/tty/serdev/core.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--drivers/uio/uio.c139
-rw-r--r--drivers/usb/chipidea/host.c5
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.h3
-rw-r--r--drivers/usb/dwc2/gadget.c20
-rw-r--r--drivers/usb/dwc2/hcd.c93
-rw-r--r--drivers/usb/dwc2/hcd.h8
-rw-r--r--drivers/usb/dwc2/hcd_intr.c11
-rw-r--r--drivers/usb/dwc2/hcd_queue.c5
-rw-r--r--drivers/usb/dwc3/core.c23
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c13
-rw-r--r--drivers/usb/gadget/composite.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c26
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig1
-rw-r--r--drivers/usb/host/xhci-dbgcap.c12
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-tegra.c6
-rw-r--r--drivers/usb/host/xhci-trace.h36
-rw-r--r--drivers/usb/host/xhci.c47
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/yurex.c23
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c15
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/typec/tcpm.c15
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c13
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/manage.c18
-rw-r--r--drivers/xen/privcmd-buf.c210
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--drivers/xen/privcmd.h3
-rw-r--r--drivers/xen/xen-scsiback.c16
416 files changed, 4236 insertions, 3081 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 38a286975c31..f8fecfec5df9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -22,6 +22,7 @@
22#include <linux/pm_domain.h> 22#include <linux/pm_domain.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/pwm.h> 24#include <linux/pwm.h>
25#include <linux/suspend.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26 27
27#include "internal.h" 28#include "internal.h"
@@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void)
946 mutex_unlock(&lpss_iosf_mutex); 947 mutex_unlock(&lpss_iosf_mutex);
947} 948}
948 949
949static int acpi_lpss_suspend(struct device *dev, bool wakeup) 950static int acpi_lpss_suspend(struct device *dev, bool runtime)
950{ 951{
951 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 952 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
953 bool wakeup = runtime || device_may_wakeup(dev);
952 int ret; 954 int ret;
953 955
954 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 956 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
961 * wrong status for devices being about to be powered off. See 963 * wrong status for devices being about to be powered off. See
962 * lpss_iosf_enter_d3_state() for further information. 964 * lpss_iosf_enter_d3_state() for further information.
963 */ 965 */
964 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 966 if ((runtime || !pm_suspend_via_firmware()) &&
967 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
965 lpss_iosf_enter_d3_state(); 968 lpss_iosf_enter_d3_state();
966 969
967 return ret; 970 return ret;
968} 971}
969 972
970static int acpi_lpss_resume(struct device *dev) 973static int acpi_lpss_resume(struct device *dev, bool runtime)
971{ 974{
972 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 975 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
973 int ret; 976 int ret;
@@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev)
976 * This call is kept first to be in symmetry with 979 * This call is kept first to be in symmetry with
977 * acpi_lpss_runtime_suspend() one. 980 * acpi_lpss_runtime_suspend() one.
978 */ 981 */
979 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 982 if ((runtime || !pm_resume_via_firmware()) &&
983 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
980 lpss_iosf_exit_d3_state(); 984 lpss_iosf_exit_d3_state();
981 985
982 ret = acpi_dev_resume(dev); 986 ret = acpi_dev_resume(dev);
@@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
1000 return 0; 1004 return 0;
1001 1005
1002 ret = pm_generic_suspend_late(dev); 1006 ret = pm_generic_suspend_late(dev);
1003 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev)); 1007 return ret ? ret : acpi_lpss_suspend(dev, false);
1004} 1008}
1005 1009
1006static int acpi_lpss_resume_early(struct device *dev) 1010static int acpi_lpss_resume_early(struct device *dev)
1007{ 1011{
1008 int ret = acpi_lpss_resume(dev); 1012 int ret = acpi_lpss_resume(dev, false);
1009 1013
1010 return ret ? ret : pm_generic_resume_early(dev); 1014 return ret ? ret : pm_generic_resume_early(dev);
1011} 1015}
@@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
1020 1024
1021static int acpi_lpss_runtime_resume(struct device *dev) 1025static int acpi_lpss_runtime_resume(struct device *dev)
1022{ 1026{
1023 int ret = acpi_lpss_resume(dev); 1027 int ret = acpi_lpss_resume(dev, true);
1024 1028
1025 return ret ? ret : pm_generic_runtime_resume(dev); 1029 return ret ? ret : pm_generic_runtime_resume(dev);
1026} 1030}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index fc0c2e2328cd..fe9d46d81750 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
51 return_ACPI_STATUS(status); 51 return_ACPI_STATUS(status);
52 } 52 }
53 53
54 /* 54 /* Disable all GPEs */
55 * 1) Disable all GPEs
56 * 2) Enable all wakeup GPEs
57 */
58 status = acpi_hw_disable_all_gpes(); 55 status = acpi_hw_disable_all_gpes();
59 if (ACPI_FAILURE(status)) { 56 if (ACPI_FAILURE(status)) {
60 return_ACPI_STATUS(status); 57 return_ACPI_STATUS(status);
61 } 58 }
59 /*
60 * If the target sleep state is S5, clear all GPEs and fixed events too
61 */
62 if (sleep_state == ACPI_STATE_S5) {
63 status = acpi_hw_clear_acpi_status();
64 if (ACPI_FAILURE(status)) {
65 return_ACPI_STATUS(status);
66 }
67 }
62 acpi_gbl_system_awake_and_running = FALSE; 68 acpi_gbl_system_awake_and_running = FALSE;
63 69
70 /* Enable all wakeup GPEs */
64 status = acpi_hw_enable_all_wakeup_gpes(); 71 status = acpi_hw_enable_all_wakeup_gpes();
65 if (ACPI_FAILURE(status)) { 72 if (ACPI_FAILURE(status)) {
66 return_ACPI_STATUS(status); 73 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 5a64ddaed8a3..e47430272692 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
182 switch (lookup_status) { 182 switch (lookup_status) {
183 case AE_ALREADY_EXISTS: 183 case AE_ALREADY_EXISTS:
184 184
185 acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); 185 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
186 message = "Failure creating"; 186 message = "Failure creating";
187 break; 187 break;
188 188
189 case AE_NOT_FOUND: 189 case AE_NOT_FOUND:
190 190
191 acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); 191 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
192 message = "Could not resolve"; 192 message = "Could not resolve";
193 break; 193 break;
194 194
195 default: 195 default:
196 196
197 acpi_os_printf("\n" ACPI_MSG_ERROR); 197 acpi_os_printf(ACPI_MSG_ERROR);
198 message = "Failure resolving"; 198 message = "Failure resolving";
199 break; 199 break;
200 } 200 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0113a5802a3..d79ad844c78f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
717 */ 717 */
718 pr_err("extension failed to load: %s", hook->name); 718 pr_err("extension failed to load: %s", hook->name);
719 __battery_hook_unregister(hook, 0); 719 __battery_hook_unregister(hook, 0);
720 return; 720 goto end;
721 } 721 }
722 } 722 }
723 pr_info("new extension: %s\n", hook->name); 723 pr_info("new extension: %s\n", hook->name);
724end:
724 mutex_unlock(&hook_mutex); 725 mutex_unlock(&hook_mutex);
725} 726}
726EXPORT_SYMBOL_GPL(battery_hook_register); 727EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
732*/ 733*/
733static void battery_hook_add_battery(struct acpi_battery *battery) 734static void battery_hook_add_battery(struct acpi_battery *battery)
734{ 735{
735 struct acpi_battery_hook *hook_node; 736 struct acpi_battery_hook *hook_node, *tmp;
736 737
737 mutex_lock(&hook_mutex); 738 mutex_lock(&hook_mutex);
738 INIT_LIST_HEAD(&battery->list); 739 INIT_LIST_HEAD(&battery->list);
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
744 * when a battery gets hotplugged or initialized 745 * when a battery gets hotplugged or initialized
745 * during the battery module initialization. 746 * during the battery module initialization.
746 */ 747 */
747 list_for_each_entry(hook_node, &battery_hook_list, list) { 748 list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
748 if (hook_node->add_battery(battery->bat)) { 749 if (hook_node->add_battery(battery->bat)) {
749 /* 750 /*
750 * The notification of the extensions has failed, to 751 * The notification of the extensions has failed, to
751 * prevent further errors we will unload the extension. 752 * prevent further errors we will unload the extension.
752 */ 753 */
753 __battery_hook_unregister(hook_node, 0);
754 pr_err("error in extension, unloading: %s", 754 pr_err("error in extension, unloading: %s",
755 hook_node->name); 755 hook_node->name);
756 __battery_hook_unregister(hook_node, 0);
756 } 757 }
757 } 758 }
758 mutex_unlock(&hook_mutex); 759 mutex_unlock(&hook_mutex);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index bb94cf0731fe..442a9e24f439 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
2037 } 2037 }
2038} 2038}
2039 2039
2040static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2041 {
2042 .ident = "Thinkpad X1 Carbon 6th",
2043 .matches = {
2044 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2045 DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
2046 },
2047 },
2048 { },
2049};
2050
2040int __init acpi_ec_init(void) 2051int __init acpi_ec_init(void)
2041{ 2052{
2042 int result; 2053 int result;
@@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
2047 if (result) 2058 if (result)
2048 return result; 2059 return result;
2049 2060
2061 /*
2062 * Disable EC wakeup on following systems to prevent periodic
2063 * wakeup from EC GPE.
2064 */
2065 if (dmi_check_system(acpi_ec_no_wakeup)) {
2066 ec_no_wakeup = true;
2067 pr_debug("Disabling EC wakeup on suspend-to-idle\n");
2068 }
2069
2050 /* Drivers must be started after acpi_ec_query_init() */ 2070 /* Drivers must be started after acpi_ec_query_init() */
2051 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); 2071 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
2052 /* 2072 /*
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d15814e1727f..7c479002e798 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
408 const guid_t *guid; 408 const guid_t *guid;
409 int rc, i; 409 int rc, i;
410 410
411 if (cmd_rc)
412 *cmd_rc = -EINVAL;
411 func = cmd; 413 func = cmd;
412 if (cmd == ND_CMD_CALL) { 414 if (cmd == ND_CMD_CALL) {
413 call_pkg = buf; 415 call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
518 * If we return an error (like elsewhere) then caller wouldn't 520 * If we return an error (like elsewhere) then caller wouldn't
519 * be able to rely upon data returned to make calculation. 521 * be able to rely upon data returned to make calculation.
520 */ 522 */
523 if (cmd_rc)
524 *cmd_rc = 0;
521 return 0; 525 return 0;
522 } 526 }
523 527
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
1273 1277
1274 mutex_lock(&acpi_desc->init_mutex); 1278 mutex_lock(&acpi_desc->init_mutex);
1275 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1279 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1276 work_busy(&acpi_desc->dwork.work) 1280 acpi_desc->scrub_busy
1277 && !acpi_desc->cancel ? "+\n" : "\n"); 1281 && !acpi_desc->cancel ? "+\n" : "\n");
1278 mutex_unlock(&acpi_desc->init_mutex); 1282 mutex_unlock(&acpi_desc->init_mutex);
1279 } 1283 }
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
2939 return 0; 2943 return 0;
2940} 2944}
2941 2945
2946static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
2947{
2948 lockdep_assert_held(&acpi_desc->init_mutex);
2949
2950 acpi_desc->scrub_busy = 1;
2951 /* note this should only be set from within the workqueue */
2952 if (tmo)
2953 acpi_desc->scrub_tmo = tmo;
2954 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
2955}
2956
2957static void sched_ars(struct acpi_nfit_desc *acpi_desc)
2958{
2959 __sched_ars(acpi_desc, 0);
2960}
2961
2962static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
2963{
2964 lockdep_assert_held(&acpi_desc->init_mutex);
2965
2966 acpi_desc->scrub_busy = 0;
2967 acpi_desc->scrub_count++;
2968 if (acpi_desc->scrub_count_state)
2969 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2970}
2971
2942static void acpi_nfit_scrub(struct work_struct *work) 2972static void acpi_nfit_scrub(struct work_struct *work)
2943{ 2973{
2944 struct acpi_nfit_desc *acpi_desc; 2974 struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
2949 mutex_lock(&acpi_desc->init_mutex); 2979 mutex_lock(&acpi_desc->init_mutex);
2950 query_rc = acpi_nfit_query_poison(acpi_desc); 2980 query_rc = acpi_nfit_query_poison(acpi_desc);
2951 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 2981 tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
2952 if (tmo) { 2982 if (tmo)
2953 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 2983 __sched_ars(acpi_desc, tmo);
2954 acpi_desc->scrub_tmo = tmo; 2984 else
2955 } else { 2985 notify_ars_done(acpi_desc);
2956 acpi_desc->scrub_count++;
2957 if (acpi_desc->scrub_count_state)
2958 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2959 }
2960 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2986 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2961 mutex_unlock(&acpi_desc->init_mutex); 2987 mutex_unlock(&acpi_desc->init_mutex);
2962} 2988}
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3037 break; 3063 break;
3038 } 3064 }
3039 3065
3040 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3066 sched_ars(acpi_desc);
3041 return 0; 3067 return 0;
3042} 3068}
3043 3069
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
3239 } 3265 }
3240 } 3266 }
3241 if (scheduled) { 3267 if (scheduled) {
3242 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3268 sched_ars(acpi_desc);
3243 dev_dbg(dev, "ars_scan triggered\n"); 3269 dev_dbg(dev, "ars_scan triggered\n");
3244 } 3270 }
3245 mutex_unlock(&acpi_desc->init_mutex); 3271 mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 7d15856a739f..a97ff42fe311 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
203 unsigned int max_ars; 203 unsigned int max_ars;
204 unsigned int scrub_count; 204 unsigned int scrub_count;
205 unsigned int scrub_mode; 205 unsigned int scrub_mode;
206 unsigned int scrub_busy:1;
206 unsigned int cancel:1; 207 unsigned int cancel:1;
207 unsigned long dimm_cmd_force_en; 208 unsigned long dimm_cmd_force_en;
208 unsigned long bus_cmd_force_en; 209 unsigned long bus_cmd_force_en;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7ca41bf023c9..8df9abfa947b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -45,6 +45,8 @@
45#include <linux/uaccess.h> 45#include <linux/uaccess.h>
46#include <linux/io-64-nonatomic-lo-hi.h> 46#include <linux/io-64-nonatomic-lo-hi.h>
47 47
48#include "acpica/accommon.h"
49#include "acpica/acnamesp.h"
48#include "internal.h" 50#include "internal.h"
49 51
50#define _COMPONENT ACPI_OS_SERVICES 52#define _COMPONENT ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
1490} 1492}
1491EXPORT_SYMBOL(acpi_check_region); 1493EXPORT_SYMBOL(acpi_check_region);
1492 1494
1495static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
1496 void *_res, void **return_value)
1497{
1498 struct acpi_mem_space_context **mem_ctx;
1499 union acpi_operand_object *handler_obj;
1500 union acpi_operand_object *region_obj2;
1501 union acpi_operand_object *region_obj;
1502 struct resource *res = _res;
1503 acpi_status status;
1504
1505 region_obj = acpi_ns_get_attached_object(handle);
1506 if (!region_obj)
1507 return AE_OK;
1508
1509 handler_obj = region_obj->region.handler;
1510 if (!handler_obj)
1511 return AE_OK;
1512
1513 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
1514 return AE_OK;
1515
1516 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
1517 return AE_OK;
1518
1519 region_obj2 = acpi_ns_get_secondary_object(region_obj);
1520 if (!region_obj2)
1521 return AE_OK;
1522
1523 mem_ctx = (void *)&region_obj2->extra.region_context;
1524
1525 if (!(mem_ctx[0]->address >= res->start &&
1526 mem_ctx[0]->address < res->end))
1527 return AE_OK;
1528
1529 status = handler_obj->address_space.setup(region_obj,
1530 ACPI_REGION_DEACTIVATE,
1531 NULL, (void **)mem_ctx);
1532 if (ACPI_SUCCESS(status))
1533 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
1534
1535 return status;
1536}
1537
1538/**
1539 * acpi_release_memory - Release any mappings done to a memory region
1540 * @handle: Handle to namespace node
1541 * @res: Memory resource
1542 * @level: A level that terminates the search
1543 *
1544 * Walks through @handle and unmaps all SystemMemory Operation Regions that
1545 * overlap with @res and that have already been activated (mapped).
1546 *
1547 * This is a helper that allows drivers to place special requirements on memory
1548 * region that may overlap with operation regions, primarily allowing them to
1549 * safely map the region as non-cached memory.
1550 *
1551 * The unmapped Operation Regions will be automatically remapped next time they
1552 * are called, so the drivers do not need to do anything else.
1553 */
1554acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
1555 u32 level)
1556{
1557 if (!(res->flags & IORESOURCE_MEM))
1558 return AE_TYPE;
1559
1560 return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
1561 acpi_deactivate_mem_region, NULL, res, NULL);
1562}
1563EXPORT_SYMBOL_GPL(acpi_release_memory);
1564
1493/* 1565/*
1494 * Let drivers know whether the resource checks are effective 1566 * Let drivers know whether the resource checks are effective
1495 */ 1567 */
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index e5ea1974d1e3..d1e26cb599bf 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
481 if (cpu_node) { 481 if (cpu_node) {
482 cpu_node = acpi_find_processor_package_id(table, cpu_node, 482 cpu_node = acpi_find_processor_package_id(table, cpu_node,
483 level, flag); 483 level, flag);
484 /* Only the first level has a guaranteed id */ 484 /*
485 if (level == 0) 485 * As per specification if the processor structure represents
486 * an actual processor, then ACPI processor ID must be valid.
487 * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
488 * should be set if the UID is valid
489 */
490 if (level == 0 ||
491 cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
486 return cpu_node->acpi_processor_id; 492 return cpu_node->acpi_processor_id;
487 return ACPI_PTR_DIFF(cpu_node, table); 493 return ACPI_PTR_DIFF(cpu_node, table);
488 } 494 }
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2b16e7c8fff3..39b181d6bd0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
398 398
399config SATA_HIGHBANK 399config SATA_HIGHBANK
400 tristate "Calxeda Highbank SATA support" 400 tristate "Calxeda Highbank SATA support"
401 depends on HAS_DMA
402 depends on ARCH_HIGHBANK || COMPILE_TEST 401 depends on ARCH_HIGHBANK || COMPILE_TEST
403 help 402 help
404 This option enables support for the Calxeda Highbank SoC's 403 This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
408 407
409config SATA_MV 408config SATA_MV
410 tristate "Marvell SATA support" 409 tristate "Marvell SATA support"
411 depends on HAS_DMA
412 depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ 410 depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
413 ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST 411 ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
414 select GENERIC_PHY 412 select GENERIC_PHY
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 738fb22978dd..b2b9eba1d214 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
400 { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ 400 { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
401 { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ 401 { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
402 { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ 402 { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
403 { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
403 404
404 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 405 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
405 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 406 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
1280 return strcmp(buf, dmi->driver_data) < 0; 1281 return strcmp(buf, dmi->driver_data) < 0;
1281} 1282}
1282 1283
1284static bool ahci_broken_lpm(struct pci_dev *pdev)
1285{
1286 static const struct dmi_system_id sysids[] = {
1287 /* Various Lenovo 50 series have LPM issues with older BIOSen */
1288 {
1289 .matches = {
1290 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1291 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
1292 },
1293 .driver_data = "20180406", /* 1.31 */
1294 },
1295 {
1296 .matches = {
1297 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1298 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
1299 },
1300 .driver_data = "20180420", /* 1.28 */
1301 },
1302 {
1303 .matches = {
1304 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1305 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
1306 },
1307 .driver_data = "20180315", /* 1.33 */
1308 },
1309 {
1310 .matches = {
1311 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1312 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
1313 },
1314 /*
1315 * Note date based on release notes, 2.35 has been
1316 * reported to be good, but I've been unable to get
1317 * a hold of the reporter to get the DMI BIOS date.
1318 * TODO: fix this.
1319 */
1320 .driver_data = "20180310", /* 2.35 */
1321 },
1322 { } /* terminate list */
1323 };
1324 const struct dmi_system_id *dmi = dmi_first_match(sysids);
1325 int year, month, date;
1326 char buf[9];
1327
1328 if (!dmi)
1329 return false;
1330
1331 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
1332 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
1333
1334 return strcmp(buf, dmi->driver_data) < 0;
1335}
1336
1283static bool ahci_broken_online(struct pci_dev *pdev) 1337static bool ahci_broken_online(struct pci_dev *pdev)
1284{ 1338{
1285#define ENCODE_BUSDEVFN(bus, slot, func) \ 1339#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1694 "quirky BIOS, skipping spindown on poweroff\n"); 1748 "quirky BIOS, skipping spindown on poweroff\n");
1695 } 1749 }
1696 1750
1751 if (ahci_broken_lpm(pdev)) {
1752 pi.flags |= ATA_FLAG_NO_LPM;
1753 dev_warn(&pdev->dev,
1754 "BIOS update required for Link Power Management support\n");
1755 }
1756
1697 if (ahci_broken_suspend(pdev)) { 1757 if (ahci_broken_suspend(pdev)) {
1698 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; 1758 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
1699 dev_warn(&pdev->dev, 1759 dev_warn(&pdev->dev,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 0045dacd814b..72d90b4c3aae 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
82 * 82 *
83 * Return: 0 on success; Error code otherwise. 83 * Return: 0 on success; Error code otherwise.
84 */ 84 */
85int ahci_mvebu_stop_engine(struct ata_port *ap) 85static int ahci_mvebu_stop_engine(struct ata_port *ap)
86{ 86{
87 void __iomem *port_mmio = ahci_port_base(ap); 87 void __iomem *port_mmio = ahci_port_base(ap);
88 u32 tmp, port_fbs; 88 u32 tmp, port_fbs;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 965842a08743..09620c2ffa0f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,6 +35,7 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/nospec.h>
38#include <linux/blkdev.h> 39#include <linux/blkdev.h>
39#include <linux/delay.h> 40#include <linux/delay.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1146 1147
1147 /* get the slot number from the message */ 1148 /* get the slot number from the message */
1148 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1149 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1149 if (pmp < EM_MAX_SLOTS) 1150 if (pmp < EM_MAX_SLOTS) {
1151 pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
1150 emp = &pp->em_priv[pmp]; 1152 emp = &pp->em_priv[pmp];
1151 else 1153 } else {
1152 return -EINVAL; 1154 return -EINVAL;
1155 }
1153 1156
1154 /* mask off the activity bits if we are in sw_activity 1157 /* mask off the activity bits if we are in sw_activity
1155 * mode, user should turn off sw_activity before setting 1158 * mode, user should turn off sw_activity before setting
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 27d15ed7fa3d..cc71c63df381 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
2493 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2493 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2494 dev->horkage |= ATA_HORKAGE_NOLPM; 2494 dev->horkage |= ATA_HORKAGE_NOLPM;
2495 2495
2496 if (ap->flags & ATA_FLAG_NO_LPM)
2497 dev->horkage |= ATA_HORKAGE_NOLPM;
2498
2496 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2499 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2497 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2500 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2498 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2501 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d5412145d76d..01306c018398 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
614 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 614 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
615 struct ata_queued_cmd *qc; 615 struct ata_queued_cmd *qc;
616 616
617 for (i = 0; i < ATA_MAX_QUEUE; i++) { 617 ata_qc_for_each_raw(ap, qc, i) {
618 qc = __ata_qc_from_tag(ap, i);
619 if (qc->flags & ATA_QCFLAG_ACTIVE && 618 if (qc->flags & ATA_QCFLAG_ACTIVE &&
620 qc->scsicmd == scmd) 619 qc->scsicmd == scmd)
621 break; 620 break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
818 817
819static int ata_eh_nr_in_flight(struct ata_port *ap) 818static int ata_eh_nr_in_flight(struct ata_port *ap)
820{ 819{
820 struct ata_queued_cmd *qc;
821 unsigned int tag; 821 unsigned int tag;
822 int nr = 0; 822 int nr = 0;
823 823
824 /* count only non-internal commands */ 824 /* count only non-internal commands */
825 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 825 ata_qc_for_each(ap, qc, tag) {
826 if (ata_tag_internal(tag)) 826 if (qc)
827 continue;
828 if (ata_qc_from_tag(ap, tag))
829 nr++; 827 nr++;
830 } 828 }
831 829
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
847 goto out_unlock; 845 goto out_unlock;
848 846
849 if (cnt == ap->fastdrain_cnt) { 847 if (cnt == ap->fastdrain_cnt) {
848 struct ata_queued_cmd *qc;
850 unsigned int tag; 849 unsigned int tag;
851 850
852 /* No progress during the last interval, tag all 851 /* No progress during the last interval, tag all
853 * in-flight qcs as timed out and freeze the port. 852 * in-flight qcs as timed out and freeze the port.
854 */ 853 */
855 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 854 ata_qc_for_each(ap, qc, tag) {
856 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
857 if (qc) 855 if (qc)
858 qc->err_mask |= AC_ERR_TIMEOUT; 856 qc->err_mask |= AC_ERR_TIMEOUT;
859 } 857 }
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
999 997
1000static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 998static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1001{ 999{
1000 struct ata_queued_cmd *qc;
1002 int tag, nr_aborted = 0; 1001 int tag, nr_aborted = 0;
1003 1002
1004 WARN_ON(!ap->ops->error_handler); 1003 WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1007 ata_eh_set_pending(ap, 0); 1006 ata_eh_set_pending(ap, 0);
1008 1007
1009 /* include internal tag in iteration */ 1008 /* include internal tag in iteration */
1010 for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) { 1009 ata_qc_for_each_with_internal(ap, qc, tag) {
1011 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1012
1013 if (qc && (!link || qc->dev->link == link)) { 1010 if (qc && (!link || qc->dev->link == link)) {
1014 qc->flags |= ATA_QCFLAG_FAILED; 1011 qc->flags |= ATA_QCFLAG_FAILED;
1015 ata_qc_complete(qc); 1012 ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1712 return; 1709 return;
1713 1710
1714 /* has LLDD analyzed already? */ 1711 /* has LLDD analyzed already? */
1715 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1712 ata_qc_for_each_raw(ap, qc, tag) {
1716 qc = __ata_qc_from_tag(ap, tag);
1717
1718 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1713 if (!(qc->flags & ATA_QCFLAG_FAILED))
1719 continue; 1714 continue;
1720 1715
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
2136{ 2131{
2137 struct ata_port *ap = link->ap; 2132 struct ata_port *ap = link->ap;
2138 struct ata_eh_context *ehc = &link->eh_context; 2133 struct ata_eh_context *ehc = &link->eh_context;
2134 struct ata_queued_cmd *qc;
2139 struct ata_device *dev; 2135 struct ata_device *dev;
2140 unsigned int all_err_mask = 0, eflags = 0; 2136 unsigned int all_err_mask = 0, eflags = 0;
2141 int tag, nr_failed = 0, nr_quiet = 0; 2137 int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
2168 2164
2169 all_err_mask |= ehc->i.err_mask; 2165 all_err_mask |= ehc->i.err_mask;
2170 2166
2171 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2167 ata_qc_for_each_raw(ap, qc, tag) {
2172 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2173
2174 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2168 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2175 ata_dev_phys_link(qc->dev) != link) 2169 ata_dev_phys_link(qc->dev) != link)
2176 continue; 2170 continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
2436{ 2430{
2437 struct ata_port *ap = link->ap; 2431 struct ata_port *ap = link->ap;
2438 struct ata_eh_context *ehc = &link->eh_context; 2432 struct ata_eh_context *ehc = &link->eh_context;
2433 struct ata_queued_cmd *qc;
2439 const char *frozen, *desc; 2434 const char *frozen, *desc;
2440 char tries_buf[6] = ""; 2435 char tries_buf[6] = "";
2441 int tag, nr_failed = 0; 2436 int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
2447 if (ehc->i.desc[0] != '\0') 2442 if (ehc->i.desc[0] != '\0')
2448 desc = ehc->i.desc; 2443 desc = ehc->i.desc;
2449 2444
2450 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2445 ata_qc_for_each_raw(ap, qc, tag) {
2451 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2452
2453 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2446 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2454 ata_dev_phys_link(qc->dev) != link || 2447 ata_dev_phys_link(qc->dev) != link ||
2455 ((qc->flags & ATA_QCFLAG_QUIET) && 2448 ((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
2511 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2504 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2512#endif 2505#endif
2513 2506
2514 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2507 ata_qc_for_each_raw(ap, qc, tag) {
2515 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2516 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2508 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2517 char data_buf[20] = ""; 2509 char data_buf[20] = "";
2518 char cdb_buf[70] = ""; 2510 char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3992 */ 3984 */
3993void ata_eh_finish(struct ata_port *ap) 3985void ata_eh_finish(struct ata_port *ap)
3994{ 3986{
3987 struct ata_queued_cmd *qc;
3995 int tag; 3988 int tag;
3996 3989
3997 /* retry or finish qcs */ 3990 /* retry or finish qcs */
3998 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3991 ata_qc_for_each_raw(ap, qc, tag) {
3999 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4000
4001 if (!(qc->flags & ATA_QCFLAG_FAILED)) 3992 if (!(qc->flags & ATA_QCFLAG_FAILED))
4002 continue; 3993 continue;
4003 3994
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6a91d04351d9..aad1b01447de 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
3805 */ 3805 */
3806 goto invalid_param_len; 3806 goto invalid_param_len;
3807 } 3807 }
3808 if (block > dev->n_sectors)
3809 goto out_of_range;
3810 3808
3811 all = cdb[14] & 0x1; 3809 all = cdb[14] & 0x1;
3810 if (all) {
3811 /*
3812 * Ignore the block address (zone ID) as defined by ZBC.
3813 */
3814 block = 0;
3815 } else if (block >= dev->n_sectors) {
3816 /*
3817 * Block must be a valid zone ID (a zone start LBA).
3818 */
3819 fp = 2;
3820 goto invalid_fld;
3821 }
3812 3822
3813 if (ata_ncq_enabled(qc->dev) && 3823 if (ata_ncq_enabled(qc->dev) &&
3814 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { 3824 ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
3837 invalid_fld: 3847 invalid_fld:
3838 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 3848 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
3839 return 1; 3849 return 1;
3840 out_of_range:
3841 /* "Logical Block Address out of range" */
3842 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
3843 return 1;
3844invalid_param_len: 3850invalid_param_len:
3845 /* "Parameter list length error" */ 3851 /* "Parameter list length error" */
3846 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3852 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b8d9cfc60374..4dc528bf8e85 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
395{ 395{
396 /* We let libATA core do actual (queue) tag allocation */ 396 /* We let libATA core do actual (queue) tag allocation */
397 397
398 /* all non NCQ/queued commands should have tag#0 */
399 if (ata_tag_internal(tag)) {
400 DPRINTK("mapping internal cmds to tag#0\n");
401 return 0;
402 }
403
404 if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { 398 if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
405 DPRINTK("tag %d invalid : out of range\n", tag); 399 DPRINTK("tag %d invalid : out of range\n", tag);
406 return 0; 400 return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1229 1223
1230 /* Workaround for data length mismatch errata */ 1224 /* Workaround for data length mismatch errata */
1231 if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { 1225 if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
1232 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1226 ata_qc_for_each_with_internal(ap, qc, tag) {
1233 qc = ata_qc_from_tag(ap, tag);
1234 if (qc && ata_is_atapi(qc->tf.protocol)) { 1227 if (qc && ata_is_atapi(qc->tf.protocol)) {
1235 u32 hcontrol; 1228 u32 hcontrol;
1236 /* Set HControl[27] to clear error registers */ 1229 /* Set HControl[27] to clear error registers */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 10ae11aa1926..72c9b922a77b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
675 struct ata_port *ap = ata_shost_to_port(sdev->host); 675 struct ata_port *ap = ata_shost_to_port(sdev->host);
676 struct nv_adma_port_priv *pp = ap->private_data; 676 struct nv_adma_port_priv *pp = ap->private_data;
677 struct nv_adma_port_priv *port0, *port1; 677 struct nv_adma_port_priv *port0, *port1;
678 struct scsi_device *sdev0, *sdev1;
679 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 678 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
680 unsigned long segment_boundary, flags; 679 unsigned long segment_boundary, flags;
681 unsigned short sg_tablesize; 680 unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
736 735
737 port0 = ap->host->ports[0]->private_data; 736 port0 = ap->host->ports[0]->private_data;
738 port1 = ap->host->ports[1]->private_data; 737 port1 = ap->host->ports[1]->private_data;
739 sdev0 = ap->host->ports[0]->link.device[0].sdev;
740 sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || 738 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { 739 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
743 /* 740 /*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ff81a576347e..82532c299bb5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
1618 skb_queue_head_init(&iadev->rx_dma_q); 1618 skb_queue_head_init(&iadev->rx_dma_q);
1619 iadev->rx_free_desc_qhead = NULL; 1619 iadev->rx_free_desc_qhead = NULL;
1620 1620
1621 iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); 1621 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1622 if (!iadev->rx_open) { 1622 if (!iadev->rx_open) {
1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", 1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1624 dev->number); 1624 dev->number);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a8d2eb0ceb8d..2c288d1f42bb 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1483 return -EFAULT; 1483 return -EFAULT;
1484 if (pool < 0 || pool > ZATM_LAST_POOL) 1484 if (pool < 0 || pool > ZATM_LAST_POOL)
1485 return -EINVAL; 1485 return -EINVAL;
1486 pool = array_index_nospec(pool,
1487 ZATM_LAST_POOL + 1);
1486 if (copy_from_user(&info, 1488 if (copy_from_user(&info,
1487 &((struct zatm_pool_req __user *) arg)->info, 1489 &((struct zatm_pool_req __user *) arg)->info,
1488 sizeof(info))) return -EFAULT; 1490 sizeof(info))) return -EFAULT;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b074f242a435..704f44295810 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -8,10 +8,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
8 topology.o container.o property.o cacheinfo.o \ 8 topology.o container.o property.o cacheinfo.o \
9 devcon.o 9 devcon.o
10obj-$(CONFIG_DEVTMPFS) += devtmpfs.o 10obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
11obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
12obj-y += power/ 11obj-y += power/
13obj-$(CONFIG_HAS_DMA) += dma-mapping.o
14obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
15obj-$(CONFIG_ISA_BUS_API) += isa.o 12obj-$(CONFIG_ISA_BUS_API) += isa.o
16obj-y += firmware_loader/ 13obj-y += firmware_loader/
17obj-$(CONFIG_NUMA) += node.o 14obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 36622b52e419..df3e1a44707a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
236 link->rpm_active = true; 236 link->rpm_active = true;
237 } 237 }
238 pm_runtime_new_link(consumer); 238 pm_runtime_new_link(consumer);
239 /*
240 * If the link is being added by the consumer driver at probe
241 * time, balance the decrementation of the supplier's runtime PM
242 * usage counter after consumer probe in driver_probe_device().
243 */
244 if (consumer->links.status == DL_DEV_PROBING)
245 pm_runtime_get_noresume(supplier);
239 } 246 }
240 get_device(supplier); 247 get_device(supplier);
241 link->supplier = supplier; 248 link->supplier = supplier;
@@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
255 switch (consumer->links.status) { 262 switch (consumer->links.status) {
256 case DL_DEV_PROBING: 263 case DL_DEV_PROBING:
257 /* 264 /*
258 * Balance the decrementation of the supplier's 265 * Some callers expect the link creation during
259 * runtime PM usage counter after consumer probe 266 * consumer driver probe to resume the supplier
260 * in driver_probe_device(). 267 * even without DL_FLAG_RPM_ACTIVE.
261 */ 268 */
262 if (flags & DL_FLAG_PM_RUNTIME) 269 if (flags & DL_FLAG_PM_RUNTIME)
263 pm_runtime_get_sync(supplier); 270 pm_runtime_resume(supplier);
264 271
265 link->status = DL_STATE_CONSUMER_PROBE; 272 link->status = DL_STATE_CONSUMER_PROBE;
266 break; 273 break;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
deleted file mode 100644
index 597d40893862..000000000000
--- a/drivers/base/dma-coherent.c
+++ /dev/null
@@ -1,434 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
6#include <linux/io.h>
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/dma-mapping.h>
11
12struct dma_coherent_mem {
13 void *virt_base;
14 dma_addr_t device_base;
15 unsigned long pfn_base;
16 int size;
17 int flags;
18 unsigned long *bitmap;
19 spinlock_t spinlock;
20 bool use_dev_dma_pfn_offset;
21};
22
23static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
24
25static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
26{
27 if (dev && dev->dma_mem)
28 return dev->dma_mem;
29 return NULL;
30}
31
32static inline dma_addr_t dma_get_device_base(struct device *dev,
33 struct dma_coherent_mem * mem)
34{
35 if (mem->use_dev_dma_pfn_offset)
36 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
37 else
38 return mem->device_base;
39}
40
41static int dma_init_coherent_memory(
42 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
43 struct dma_coherent_mem **mem)
44{
45 struct dma_coherent_mem *dma_mem = NULL;
46 void __iomem *mem_base = NULL;
47 int pages = size >> PAGE_SHIFT;
48 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
49 int ret;
50
51 if (!size) {
52 ret = -EINVAL;
53 goto out;
54 }
55
56 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
57 if (!mem_base) {
58 ret = -EINVAL;
59 goto out;
60 }
61 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
62 if (!dma_mem) {
63 ret = -ENOMEM;
64 goto out;
65 }
66 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
67 if (!dma_mem->bitmap) {
68 ret = -ENOMEM;
69 goto out;
70 }
71
72 dma_mem->virt_base = mem_base;
73 dma_mem->device_base = device_addr;
74 dma_mem->pfn_base = PFN_DOWN(phys_addr);
75 dma_mem->size = pages;
76 dma_mem->flags = flags;
77 spin_lock_init(&dma_mem->spinlock);
78
79 *mem = dma_mem;
80 return 0;
81
82out:
83 kfree(dma_mem);
84 if (mem_base)
85 memunmap(mem_base);
86 return ret;
87}
88
89static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
90{
91 if (!mem)
92 return;
93
94 memunmap(mem->virt_base);
95 kfree(mem->bitmap);
96 kfree(mem);
97}
98
99static int dma_assign_coherent_memory(struct device *dev,
100 struct dma_coherent_mem *mem)
101{
102 if (!dev)
103 return -ENODEV;
104
105 if (dev->dma_mem)
106 return -EBUSY;
107
108 dev->dma_mem = mem;
109 return 0;
110}
111
112int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
113 dma_addr_t device_addr, size_t size, int flags)
114{
115 struct dma_coherent_mem *mem;
116 int ret;
117
118 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
119 if (ret)
120 return ret;
121
122 ret = dma_assign_coherent_memory(dev, mem);
123 if (ret)
124 dma_release_coherent_memory(mem);
125 return ret;
126}
127EXPORT_SYMBOL(dma_declare_coherent_memory);
128
129void dma_release_declared_memory(struct device *dev)
130{
131 struct dma_coherent_mem *mem = dev->dma_mem;
132
133 if (!mem)
134 return;
135 dma_release_coherent_memory(mem);
136 dev->dma_mem = NULL;
137}
138EXPORT_SYMBOL(dma_release_declared_memory);
139
140void *dma_mark_declared_memory_occupied(struct device *dev,
141 dma_addr_t device_addr, size_t size)
142{
143 struct dma_coherent_mem *mem = dev->dma_mem;
144 unsigned long flags;
145 int pos, err;
146
147 size += device_addr & ~PAGE_MASK;
148
149 if (!mem)
150 return ERR_PTR(-EINVAL);
151
152 spin_lock_irqsave(&mem->spinlock, flags);
153 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
154 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
155 spin_unlock_irqrestore(&mem->spinlock, flags);
156
157 if (err != 0)
158 return ERR_PTR(err);
159 return mem->virt_base + (pos << PAGE_SHIFT);
160}
161EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
162
163static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
164 ssize_t size, dma_addr_t *dma_handle)
165{
166 int order = get_order(size);
167 unsigned long flags;
168 int pageno;
169 void *ret;
170
171 spin_lock_irqsave(&mem->spinlock, flags);
172
173 if (unlikely(size > (mem->size << PAGE_SHIFT)))
174 goto err;
175
176 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
177 if (unlikely(pageno < 0))
178 goto err;
179
180 /*
181 * Memory was found in the coherent area.
182 */
183 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
184 ret = mem->virt_base + (pageno << PAGE_SHIFT);
185 spin_unlock_irqrestore(&mem->spinlock, flags);
186 memset(ret, 0, size);
187 return ret;
188err:
189 spin_unlock_irqrestore(&mem->spinlock, flags);
190 return NULL;
191}
192
193/**
194 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
195 * @dev: device from which we allocate memory
196 * @size: size of requested memory area
197 * @dma_handle: This will be filled with the correct dma handle
198 * @ret: This pointer will be filled with the virtual address
199 * to allocated area.
200 *
201 * This function should be only called from per-arch dma_alloc_coherent()
202 * to support allocation from per-device coherent memory pools.
203 *
204 * Returns 0 if dma_alloc_coherent should continue with allocating from
205 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
206 */
207int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
208 dma_addr_t *dma_handle, void **ret)
209{
210 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
211
212 if (!mem)
213 return 0;
214
215 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
216 if (*ret)
217 return 1;
218
219 /*
220 * In the case where the allocation can not be satisfied from the
221 * per-device area, try to fall back to generic memory if the
222 * constraints allow it.
223 */
224 return mem->flags & DMA_MEMORY_EXCLUSIVE;
225}
226EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
227
228void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
229{
230 if (!dma_coherent_default_memory)
231 return NULL;
232
233 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
234 dma_handle);
235}
236
237static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
238 int order, void *vaddr)
239{
240 if (mem && vaddr >= mem->virt_base && vaddr <
241 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
242 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
243 unsigned long flags;
244
245 spin_lock_irqsave(&mem->spinlock, flags);
246 bitmap_release_region(mem->bitmap, page, order);
247 spin_unlock_irqrestore(&mem->spinlock, flags);
248 return 1;
249 }
250 return 0;
251}
252
253/**
254 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
255 * @dev: device from which the memory was allocated
256 * @order: the order of pages allocated
257 * @vaddr: virtual address of allocated pages
258 *
259 * This checks whether the memory was allocated from the per-device
260 * coherent memory pool and if so, releases that memory.
261 *
262 * Returns 1 if we correctly released the memory, or 0 if the caller should
263 * proceed with releasing memory from generic pools.
264 */
265int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
266{
267 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
268
269 return __dma_release_from_coherent(mem, order, vaddr);
270}
271EXPORT_SYMBOL(dma_release_from_dev_coherent);
272
273int dma_release_from_global_coherent(int order, void *vaddr)
274{
275 if (!dma_coherent_default_memory)
276 return 0;
277
278 return __dma_release_from_coherent(dma_coherent_default_memory, order,
279 vaddr);
280}
281
282static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
283 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
284{
285 if (mem && vaddr >= mem->virt_base && vaddr + size <=
286 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
287 unsigned long off = vma->vm_pgoff;
288 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
289 int user_count = vma_pages(vma);
290 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
291
292 *ret = -ENXIO;
293 if (off < count && user_count <= count - off) {
294 unsigned long pfn = mem->pfn_base + start + off;
295 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
296 user_count << PAGE_SHIFT,
297 vma->vm_page_prot);
298 }
299 return 1;
300 }
301 return 0;
302}
303
304/**
305 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
306 * @dev: device from which the memory was allocated
307 * @vma: vm_area for the userspace memory
308 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
309 * @size: size of the memory buffer allocated
310 * @ret: result from remap_pfn_range()
311 *
312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma.
314 *
315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
318 */
319int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
320 void *vaddr, size_t size, int *ret)
321{
322 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
323
324 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
325}
326EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
327
328int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
329 size_t size, int *ret)
330{
331 if (!dma_coherent_default_memory)
332 return 0;
333
334 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
335 vaddr, size, ret);
336}
337
338/*
339 * Support for reserved memory regions defined in device tree
340 */
341#ifdef CONFIG_OF_RESERVED_MEM
342#include <linux/of.h>
343#include <linux/of_fdt.h>
344#include <linux/of_reserved_mem.h>
345
346static struct reserved_mem *dma_reserved_default_memory __initdata;
347
348static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
349{
350 struct dma_coherent_mem *mem = rmem->priv;
351 int ret;
352
353 if (!mem) {
354 ret = dma_init_coherent_memory(rmem->base, rmem->base,
355 rmem->size,
356 DMA_MEMORY_EXCLUSIVE, &mem);
357 if (ret) {
358 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
359 &rmem->base, (unsigned long)rmem->size / SZ_1M);
360 return ret;
361 }
362 }
363 mem->use_dev_dma_pfn_offset = true;
364 rmem->priv = mem;
365 dma_assign_coherent_memory(dev, mem);
366 return 0;
367}
368
369static void rmem_dma_device_release(struct reserved_mem *rmem,
370 struct device *dev)
371{
372 if (dev)
373 dev->dma_mem = NULL;
374}
375
376static const struct reserved_mem_ops rmem_dma_ops = {
377 .device_init = rmem_dma_device_init,
378 .device_release = rmem_dma_device_release,
379};
380
381static int __init rmem_dma_setup(struct reserved_mem *rmem)
382{
383 unsigned long node = rmem->fdt_node;
384
385 if (of_get_flat_dt_prop(node, "reusable", NULL))
386 return -EINVAL;
387
388#ifdef CONFIG_ARM
389 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
390 pr_err("Reserved memory: regions without no-map are not yet supported\n");
391 return -EINVAL;
392 }
393
394 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
395 WARN(dma_reserved_default_memory,
396 "Reserved memory: region for default DMA coherent area is redefined\n");
397 dma_reserved_default_memory = rmem;
398 }
399#endif
400
401 rmem->ops = &rmem_dma_ops;
402 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
403 &rmem->base, (unsigned long)rmem->size / SZ_1M);
404 return 0;
405}
406
407static int __init dma_init_reserved_memory(void)
408{
409 const struct reserved_mem_ops *ops;
410 int ret;
411
412 if (!dma_reserved_default_memory)
413 return -ENOMEM;
414
415 ops = dma_reserved_default_memory->ops;
416
417 /*
418 * We rely on rmem_dma_device_init() does not propagate error of
419 * dma_assign_coherent_memory() for "NULL" device.
420 */
421 ret = ops->device_init(dma_reserved_default_memory, NULL);
422
423 if (!ret) {
424 dma_coherent_default_memory = dma_reserved_default_memory->priv;
425 pr_info("DMA: default coherent area is set\n");
426 }
427
428 return ret;
429}
430
431core_initcall(dma_init_reserved_memory);
432
433RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
434#endif
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
deleted file mode 100644
index d987dcd1bd56..000000000000
--- a/drivers/base/dma-contiguous.c
+++ /dev/null
@@ -1,278 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Contiguous Memory Allocator for DMA mapping framework
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Written by:
6 * Marek Szyprowski <m.szyprowski@samsung.com>
7 * Michal Nazarewicz <mina86@mina86.com>
8 */
9
10#define pr_fmt(fmt) "cma: " fmt
11
12#ifdef CONFIG_CMA_DEBUG
13#ifndef DEBUG
14# define DEBUG
15#endif
16#endif
17
18#include <asm/page.h>
19#include <asm/dma-contiguous.h>
20
21#include <linux/memblock.h>
22#include <linux/err.h>
23#include <linux/sizes.h>
24#include <linux/dma-contiguous.h>
25#include <linux/cma.h>
26
27#ifdef CONFIG_CMA_SIZE_MBYTES
28#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
29#else
30#define CMA_SIZE_MBYTES 0
31#endif
32
33struct cma *dma_contiguous_default_area;
34
35/*
36 * Default global CMA area size can be defined in kernel's .config.
37 * This is useful mainly for distro maintainers to create a kernel
38 * that works correctly for most supported systems.
39 * The size can be set in bytes or as a percentage of the total memory
40 * in the system.
41 *
42 * Users, who want to set the size of global CMA area for their system
43 * should use cma= kernel parameter.
44 */
45static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
46static phys_addr_t size_cmdline = -1;
47static phys_addr_t base_cmdline;
48static phys_addr_t limit_cmdline;
49
50static int __init early_cma(char *p)
51{
52 pr_debug("%s(%s)\n", __func__, p);
53 size_cmdline = memparse(p, &p);
54 if (*p != '@')
55 return 0;
56 base_cmdline = memparse(p + 1, &p);
57 if (*p != '-') {
58 limit_cmdline = base_cmdline + size_cmdline;
59 return 0;
60 }
61 limit_cmdline = memparse(p + 1, &p);
62
63 return 0;
64}
65early_param("cma", early_cma);
66
67#ifdef CONFIG_CMA_SIZE_PERCENTAGE
68
69static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
70{
71 struct memblock_region *reg;
72 unsigned long total_pages = 0;
73
74 /*
75 * We cannot use memblock_phys_mem_size() here, because
76 * memblock_analyze() has not been called yet.
77 */
78 for_each_memblock(memory, reg)
79 total_pages += memblock_region_memory_end_pfn(reg) -
80 memblock_region_memory_base_pfn(reg);
81
82 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
83}
84
85#else
86
87static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
88{
89 return 0;
90}
91
92#endif
93
94/**
95 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
96 * @limit: End address of the reserved memory (optional, 0 for any).
97 *
98 * This function reserves memory from early allocator. It should be
99 * called by arch specific code once the early allocator (memblock or bootmem)
100 * has been activated and all other subsystems have already allocated/reserved
101 * memory.
102 */
103void __init dma_contiguous_reserve(phys_addr_t limit)
104{
105 phys_addr_t selected_size = 0;
106 phys_addr_t selected_base = 0;
107 phys_addr_t selected_limit = limit;
108 bool fixed = false;
109
110 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
111
112 if (size_cmdline != -1) {
113 selected_size = size_cmdline;
114 selected_base = base_cmdline;
115 selected_limit = min_not_zero(limit_cmdline, limit);
116 if (base_cmdline + size_cmdline == limit_cmdline)
117 fixed = true;
118 } else {
119#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
120 selected_size = size_bytes;
121#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
122 selected_size = cma_early_percent_memory();
123#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
124 selected_size = min(size_bytes, cma_early_percent_memory());
125#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
126 selected_size = max(size_bytes, cma_early_percent_memory());
127#endif
128 }
129
130 if (selected_size && !dma_contiguous_default_area) {
131 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
132 (unsigned long)selected_size / SZ_1M);
133
134 dma_contiguous_reserve_area(selected_size, selected_base,
135 selected_limit,
136 &dma_contiguous_default_area,
137 fixed);
138 }
139}
140
141/**
142 * dma_contiguous_reserve_area() - reserve custom contiguous area
143 * @size: Size of the reserved area (in bytes),
144 * @base: Base address of the reserved area optional, use 0 for any
145 * @limit: End address of the reserved memory (optional, 0 for any).
146 * @res_cma: Pointer to store the created cma region.
147 * @fixed: hint about where to place the reserved area
148 *
149 * This function reserves memory from early allocator. It should be
150 * called by arch specific code once the early allocator (memblock or bootmem)
151 * has been activated and all other subsystems have already allocated/reserved
152 * memory. This function allows to create custom reserved areas for specific
153 * devices.
154 *
155 * If @fixed is true, reserve contiguous area at exactly @base. If false,
156 * reserve in range from @base to @limit.
157 */
158int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
159 phys_addr_t limit, struct cma **res_cma,
160 bool fixed)
161{
162 int ret;
163
164 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
165 "reserved", res_cma);
166 if (ret)
167 return ret;
168
169 /* Architecture specific contiguous memory fixup. */
170 dma_contiguous_early_fixup(cma_get_base(*res_cma),
171 cma_get_size(*res_cma));
172
173 return 0;
174}
175
176/**
177 * dma_alloc_from_contiguous() - allocate pages from contiguous area
178 * @dev: Pointer to device for which the allocation is performed.
179 * @count: Requested number of pages.
180 * @align: Requested alignment of pages (in PAGE_SIZE order).
181 * @gfp_mask: GFP flags to use for this allocation.
182 *
183 * This function allocates memory buffer for specified device. It uses
184 * device specific contiguous memory area if available or the default
185 * global one. Requires architecture specific dev_get_cma_area() helper
186 * function.
187 */
188struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
189 unsigned int align, gfp_t gfp_mask)
190{
191 if (align > CONFIG_CMA_ALIGNMENT)
192 align = CONFIG_CMA_ALIGNMENT;
193
194 return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
195}
196
197/**
198 * dma_release_from_contiguous() - release allocated pages
199 * @dev: Pointer to device for which the pages were allocated.
200 * @pages: Allocated pages.
201 * @count: Number of allocated pages.
202 *
203 * This function releases memory allocated by dma_alloc_from_contiguous().
204 * It returns false when provided pages do not belong to contiguous area and
205 * true otherwise.
206 */
207bool dma_release_from_contiguous(struct device *dev, struct page *pages,
208 int count)
209{
210 return cma_release(dev_get_cma_area(dev), pages, count);
211}
212
213/*
214 * Support for reserved memory regions defined in device tree
215 */
216#ifdef CONFIG_OF_RESERVED_MEM
217#include <linux/of.h>
218#include <linux/of_fdt.h>
219#include <linux/of_reserved_mem.h>
220
221#undef pr_fmt
222#define pr_fmt(fmt) fmt
223
224static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
225{
226 dev_set_cma_area(dev, rmem->priv);
227 return 0;
228}
229
230static void rmem_cma_device_release(struct reserved_mem *rmem,
231 struct device *dev)
232{
233 dev_set_cma_area(dev, NULL);
234}
235
236static const struct reserved_mem_ops rmem_cma_ops = {
237 .device_init = rmem_cma_device_init,
238 .device_release = rmem_cma_device_release,
239};
240
241static int __init rmem_cma_setup(struct reserved_mem *rmem)
242{
243 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
244 phys_addr_t mask = align - 1;
245 unsigned long node = rmem->fdt_node;
246 struct cma *cma;
247 int err;
248
249 if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
250 of_get_flat_dt_prop(node, "no-map", NULL))
251 return -EINVAL;
252
253 if ((rmem->base & mask) || (rmem->size & mask)) {
254 pr_err("Reserved memory: incorrect alignment of CMA region\n");
255 return -EINVAL;
256 }
257
258 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
259 if (err) {
260 pr_err("Reserved memory: unable to setup CMA region\n");
261 return err;
262 }
263 /* Architecture specific contiguous memory fixup. */
264 dma_contiguous_early_fixup(rmem->base, rmem->size);
265
266 if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
267 dma_contiguous_set_default(cma);
268
269 rmem->ops = &rmem_cma_ops;
270 rmem->priv = cma;
271
272 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
273 &rmem->base, (unsigned long)rmem->size / SZ_1M);
274
275 return 0;
276}
277RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
278#endif
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
deleted file mode 100644
index f831a582209c..000000000000
--- a/drivers/base/dma-mapping.c
+++ /dev/null
@@ -1,345 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
4 *
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 */
8
9#include <linux/acpi.h>
10#include <linux/dma-mapping.h>
11#include <linux/export.h>
12#include <linux/gfp.h>
13#include <linux/of_device.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16
17/*
18 * Managed DMA API
19 */
20struct dma_devres {
21 size_t size;
22 void *vaddr;
23 dma_addr_t dma_handle;
24 unsigned long attrs;
25};
26
27static void dmam_release(struct device *dev, void *res)
28{
29 struct dma_devres *this = res;
30
31 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
32 this->attrs);
33}
34
35static int dmam_match(struct device *dev, void *res, void *match_data)
36{
37 struct dma_devres *this = res, *match = match_data;
38
39 if (this->vaddr == match->vaddr) {
40 WARN_ON(this->size != match->size ||
41 this->dma_handle != match->dma_handle);
42 return 1;
43 }
44 return 0;
45}
46
47/**
48 * dmam_alloc_coherent - Managed dma_alloc_coherent()
49 * @dev: Device to allocate coherent memory for
50 * @size: Size of allocation
51 * @dma_handle: Out argument for allocated DMA handle
52 * @gfp: Allocation flags
53 *
54 * Managed dma_alloc_coherent(). Memory allocated using this function
55 * will be automatically released on driver detach.
56 *
57 * RETURNS:
58 * Pointer to allocated memory on success, NULL on failure.
59 */
60void *dmam_alloc_coherent(struct device *dev, size_t size,
61 dma_addr_t *dma_handle, gfp_t gfp)
62{
63 struct dma_devres *dr;
64 void *vaddr;
65
66 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
67 if (!dr)
68 return NULL;
69
70 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
71 if (!vaddr) {
72 devres_free(dr);
73 return NULL;
74 }
75
76 dr->vaddr = vaddr;
77 dr->dma_handle = *dma_handle;
78 dr->size = size;
79
80 devres_add(dev, dr);
81
82 return vaddr;
83}
84EXPORT_SYMBOL(dmam_alloc_coherent);
85
86/**
87 * dmam_free_coherent - Managed dma_free_coherent()
88 * @dev: Device to free coherent memory for
89 * @size: Size of allocation
90 * @vaddr: Virtual address of the memory to free
91 * @dma_handle: DMA handle of the memory to free
92 *
93 * Managed dma_free_coherent().
94 */
95void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
96 dma_addr_t dma_handle)
97{
98 struct dma_devres match_data = { size, vaddr, dma_handle };
99
100 dma_free_coherent(dev, size, vaddr, dma_handle);
101 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
102}
103EXPORT_SYMBOL(dmam_free_coherent);
104
105/**
106 * dmam_alloc_attrs - Managed dma_alloc_attrs()
107 * @dev: Device to allocate non_coherent memory for
108 * @size: Size of allocation
109 * @dma_handle: Out argument for allocated DMA handle
110 * @gfp: Allocation flags
111 * @attrs: Flags in the DMA_ATTR_* namespace.
112 *
113 * Managed dma_alloc_attrs(). Memory allocated using this function will be
114 * automatically released on driver detach.
115 *
116 * RETURNS:
117 * Pointer to allocated memory on success, NULL on failure.
118 */
119void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
120 gfp_t gfp, unsigned long attrs)
121{
122 struct dma_devres *dr;
123 void *vaddr;
124
125 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
126 if (!dr)
127 return NULL;
128
129 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
130 if (!vaddr) {
131 devres_free(dr);
132 return NULL;
133 }
134
135 dr->vaddr = vaddr;
136 dr->dma_handle = *dma_handle;
137 dr->size = size;
138 dr->attrs = attrs;
139
140 devres_add(dev, dr);
141
142 return vaddr;
143}
144EXPORT_SYMBOL(dmam_alloc_attrs);
145
146#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
147
148static void dmam_coherent_decl_release(struct device *dev, void *res)
149{
150 dma_release_declared_memory(dev);
151}
152
153/**
154 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
155 * @dev: Device to declare coherent memory for
156 * @phys_addr: Physical address of coherent memory to be declared
157 * @device_addr: Device address of coherent memory to be declared
158 * @size: Size of coherent memory to be declared
159 * @flags: Flags
160 *
161 * Managed dma_declare_coherent_memory().
162 *
163 * RETURNS:
164 * 0 on success, -errno on failure.
165 */
166int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
167 dma_addr_t device_addr, size_t size, int flags)
168{
169 void *res;
170 int rc;
171
172 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
173 if (!res)
174 return -ENOMEM;
175
176 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
177 flags);
178 if (!rc)
179 devres_add(dev, res);
180 else
181 devres_free(res);
182
183 return rc;
184}
185EXPORT_SYMBOL(dmam_declare_coherent_memory);
186
187/**
188 * dmam_release_declared_memory - Managed dma_release_declared_memory().
189 * @dev: Device to release declared coherent memory for
190 *
191 * Managed dmam_release_declared_memory().
192 */
193void dmam_release_declared_memory(struct device *dev)
194{
195 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
196}
197EXPORT_SYMBOL(dmam_release_declared_memory);
198
199#endif
200
201/*
202 * Create scatter-list for the already allocated DMA buffer.
203 */
204int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
205 void *cpu_addr, dma_addr_t handle, size_t size)
206{
207 struct page *page = virt_to_page(cpu_addr);
208 int ret;
209
210 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
211 if (unlikely(ret))
212 return ret;
213
214 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
215 return 0;
216}
217EXPORT_SYMBOL(dma_common_get_sgtable);
218
219/*
220 * Create userspace mapping for the DMA-coherent memory.
221 */
222int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
223 void *cpu_addr, dma_addr_t dma_addr, size_t size)
224{
225 int ret = -ENXIO;
226#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
227 unsigned long user_count = vma_pages(vma);
228 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
229 unsigned long off = vma->vm_pgoff;
230
231 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
232
233 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
234 return ret;
235
236 if (off < count && user_count <= (count - off))
237 ret = remap_pfn_range(vma, vma->vm_start,
238 page_to_pfn(virt_to_page(cpu_addr)) + off,
239 user_count << PAGE_SHIFT,
240 vma->vm_page_prot);
241#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
242
243 return ret;
244}
245EXPORT_SYMBOL(dma_common_mmap);
246
247#ifdef CONFIG_MMU
248static struct vm_struct *__dma_common_pages_remap(struct page **pages,
249 size_t size, unsigned long vm_flags, pgprot_t prot,
250 const void *caller)
251{
252 struct vm_struct *area;
253
254 area = get_vm_area_caller(size, vm_flags, caller);
255 if (!area)
256 return NULL;
257
258 if (map_vm_area(area, prot, pages)) {
259 vunmap(area->addr);
260 return NULL;
261 }
262
263 return area;
264}
265
266/*
267 * remaps an array of PAGE_SIZE pages into another vm_area
268 * Cannot be used in non-sleeping contexts
269 */
270void *dma_common_pages_remap(struct page **pages, size_t size,
271 unsigned long vm_flags, pgprot_t prot,
272 const void *caller)
273{
274 struct vm_struct *area;
275
276 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
277 if (!area)
278 return NULL;
279
280 area->pages = pages;
281
282 return area->addr;
283}
284
285/*
286 * remaps an allocated contiguous region into another vm_area.
287 * Cannot be used in non-sleeping contexts
288 */
289
290void *dma_common_contiguous_remap(struct page *page, size_t size,
291 unsigned long vm_flags,
292 pgprot_t prot, const void *caller)
293{
294 int i;
295 struct page **pages;
296 struct vm_struct *area;
297
298 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
299 if (!pages)
300 return NULL;
301
302 for (i = 0; i < (size >> PAGE_SHIFT); i++)
303 pages[i] = nth_page(page, i);
304
305 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
306
307 kfree(pages);
308
309 if (!area)
310 return NULL;
311 return area->addr;
312}
313
314/*
315 * unmaps a range previously mapped by dma_common_*_remap
316 */
317void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
318{
319 struct vm_struct *area = find_vm_area(cpu_addr);
320
321 if (!area || (area->flags & vm_flags) != vm_flags) {
322 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
323 return;
324 }
325
326 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
327 vunmap(cpu_addr);
328}
329#endif
330
331/*
332 * enables DMA API use for a device
333 */
334int dma_configure(struct device *dev)
335{
336 if (dev->bus->dma_configure)
337 return dev->bus->dma_configure(dev);
338 return 0;
339}
340
341void dma_deconfigure(struct device *dev)
342{
343 of_dma_deconfigure(dev);
344 acpi_dma_deconfigure(dev);
345}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4925af5c4cf0..9e8484189034 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
2235} 2235}
2236 2236
2237static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, 2237static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2238 unsigned int index) 2238 unsigned int index, bool power_on)
2239{ 2239{
2240 struct of_phandle_args pd_args; 2240 struct of_phandle_args pd_args;
2241 struct generic_pm_domain *pd; 2241 struct generic_pm_domain *pd;
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2271 dev->pm_domain->detach = genpd_dev_pm_detach; 2271 dev->pm_domain->detach = genpd_dev_pm_detach;
2272 dev->pm_domain->sync = genpd_dev_pm_sync; 2272 dev->pm_domain->sync = genpd_dev_pm_sync;
2273 2273
2274 genpd_lock(pd); 2274 if (power_on) {
2275 ret = genpd_power_on(pd, 0); 2275 genpd_lock(pd);
2276 genpd_unlock(pd); 2276 ret = genpd_power_on(pd, 0);
2277 genpd_unlock(pd);
2278 }
2277 2279
2278 if (ret) 2280 if (ret)
2279 genpd_remove_device(pd, dev); 2281 genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
2307 "#power-domain-cells") != 1) 2309 "#power-domain-cells") != 1)
2308 return 0; 2310 return 0;
2309 2311
2310 return __genpd_dev_pm_attach(dev, dev->of_node, 0); 2312 return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2311} 2313}
2312EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2314EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2313 2315
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2359 } 2361 }
2360 2362
2361 /* Try to attach the device to the PM domain at the specified index. */ 2363 /* Try to attach the device to the PM domain at the specified index. */
2362 ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index); 2364 ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
2363 if (ret < 1) { 2365 if (ret < 1) {
2364 device_unregister(genpd_dev); 2366 device_unregister(genpd_dev);
2365 return ret ? ERR_PTR(ret) : NULL; 2367 return ret ? ERR_PTR(ret) : NULL;
2366 } 2368 }
2367 2369
2368 pm_runtime_set_active(genpd_dev);
2369 pm_runtime_enable(genpd_dev); 2370 pm_runtime_enable(genpd_dev);
2371 genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
2370 2372
2371 return genpd_dev; 2373 return genpd_dev;
2372} 2374}
@@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2487 * power domain corresponding to a DT node's "required-opps" property. 2489 * power domain corresponding to a DT node's "required-opps" property.
2488 * 2490 *
2489 * @dev: Device for which the performance-state needs to be found. 2491 * @dev: Device for which the performance-state needs to be found.
2490 * @opp_node: DT node where the "required-opps" property is present. This can be 2492 * @np: DT node where the "required-opps" property is present. This can be
2491 * the device node itself (if it doesn't have an OPP table) or a node 2493 * the device node itself (if it doesn't have an OPP table) or a node
2492 * within the OPP table of a device (if device has an OPP table). 2494 * within the OPP table of a device (if device has an OPP table).
2493 * @state: Pointer to return performance state.
2494 * 2495 *
2495 * Returns performance state corresponding to the "required-opps" property of 2496 * Returns performance state corresponding to the "required-opps" property of
2496 * a DT node. This calls platform specific genpd->opp_to_performance_state() 2497 * a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2499 * Returns performance state on success and 0 on failure. 2500 * Returns performance state on success and 0 on failure.
2500 */ 2501 */
2501unsigned int of_genpd_opp_to_performance_state(struct device *dev, 2502unsigned int of_genpd_opp_to_performance_state(struct device *dev,
2502 struct device_node *opp_node) 2503 struct device_node *np)
2503{ 2504{
2504 struct generic_pm_domain *genpd; 2505 struct generic_pm_domain *genpd;
2505 struct dev_pm_opp *opp; 2506 struct dev_pm_opp *opp;
@@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
2514 2515
2515 genpd_lock(genpd); 2516 genpd_lock(genpd);
2516 2517
2517 opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node); 2518 opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
2518 if (IS_ERR(opp)) { 2519 if (IS_ERR(opp)) {
2519 dev_err(dev, "Failed to find required OPP: %ld\n", 2520 dev_err(dev, "Failed to find required OPP: %ld\n",
2520 PTR_ERR(opp)); 2521 PTR_ERR(opp));
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a47e4987ee46..d146fedc38bb 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
1244 _drbd_start_io_acct(device, req); 1244 _drbd_start_io_acct(device, req);
1245 1245
1246 /* process discards always from our submitter thread */ 1246 /* process discards always from our submitter thread */
1247 if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) || 1247 if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1248 (bio_op(bio) & REQ_OP_DISCARD)) 1248 bio_op(bio) == REQ_OP_DISCARD)
1249 goto queue_for_submitter_thread; 1249 goto queue_for_submitter_thread;
1250 1250
1251 if (rw == WRITE && req->private_bio && req->i.size 1251 if (rw == WRITE && req->private_bio && req->i.size
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1476cb3439f4..5e793dd7adfb 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
282 what = COMPLETED_OK; 282 what = COMPLETED_OK;
283 } 283 }
284 284
285 bio_put(req->private_bio);
286 req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); 285 req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
286 bio_put(bio);
287 287
288 /* not req_mod(), we need irqsave here! */ 288 /* not req_mod(), we need irqsave here! */
289 spin_lock_irqsave(&device->resource->req_lock, flags); 289 spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d6b6f434fd4b..4cb1d1be3cfb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1613 arg = (unsigned long) compat_ptr(arg); 1613 arg = (unsigned long) compat_ptr(arg);
1614 case LOOP_SET_FD: 1614 case LOOP_SET_FD:
1615 case LOOP_CHANGE_FD: 1615 case LOOP_CHANGE_FD:
1616 case LOOP_SET_BLOCK_SIZE:
1616 err = lo_ioctl(bdev, mode, cmd, arg); 1617 err = lo_ioctl(bdev, mode, cmd, arg);
1617 break; 1618 break;
1618 default: 1619 default:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3b7083b8ecbb..74a05561b620 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -76,6 +76,7 @@ struct link_dead_args {
76#define NBD_HAS_CONFIG_REF 4 76#define NBD_HAS_CONFIG_REF 4
77#define NBD_BOUND 5 77#define NBD_BOUND 5
78#define NBD_DESTROY_ON_DISCONNECT 6 78#define NBD_DESTROY_ON_DISCONNECT 6
79#define NBD_DISCONNECT_ON_CLOSE 7
79 80
80struct nbd_config { 81struct nbd_config {
81 u32 flags; 82 u32 flags;
@@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd);
138static void nbd_connect_reply(struct genl_info *info, int index); 139static void nbd_connect_reply(struct genl_info *info, int index);
139static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); 140static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
140static void nbd_dead_link_work(struct work_struct *work); 141static void nbd_dead_link_work(struct work_struct *work);
142static void nbd_disconnect_and_put(struct nbd_device *nbd);
141 143
142static inline struct device *nbd_to_dev(struct nbd_device *nbd) 144static inline struct device *nbd_to_dev(struct nbd_device *nbd)
143{ 145{
@@ -1305,6 +1307,12 @@ out:
1305static void nbd_release(struct gendisk *disk, fmode_t mode) 1307static void nbd_release(struct gendisk *disk, fmode_t mode)
1306{ 1308{
1307 struct nbd_device *nbd = disk->private_data; 1309 struct nbd_device *nbd = disk->private_data;
1310 struct block_device *bdev = bdget_disk(disk, 0);
1311
1312 if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1313 bdev->bd_openers == 0)
1314 nbd_disconnect_and_put(nbd);
1315
1308 nbd_config_put(nbd); 1316 nbd_config_put(nbd);
1309 nbd_put(nbd); 1317 nbd_put(nbd);
1310} 1318}
@@ -1705,6 +1713,10 @@ again:
1705 &config->runtime_flags); 1713 &config->runtime_flags);
1706 put_dev = true; 1714 put_dev = true;
1707 } 1715 }
1716 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1717 set_bit(NBD_DISCONNECT_ON_CLOSE,
1718 &config->runtime_flags);
1719 }
1708 } 1720 }
1709 1721
1710 if (info->attrs[NBD_ATTR_SOCKETS]) { 1722 if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1761,17 @@ out:
1749 return ret; 1761 return ret;
1750} 1762}
1751 1763
1764static void nbd_disconnect_and_put(struct nbd_device *nbd)
1765{
1766 mutex_lock(&nbd->config_lock);
1767 nbd_disconnect(nbd);
1768 nbd_clear_sock(nbd);
1769 mutex_unlock(&nbd->config_lock);
1770 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1771 &nbd->config->runtime_flags))
1772 nbd_config_put(nbd);
1773}
1774
1752static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) 1775static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1753{ 1776{
1754 struct nbd_device *nbd; 1777 struct nbd_device *nbd;
@@ -1781,13 +1804,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1781 nbd_put(nbd); 1804 nbd_put(nbd);
1782 return 0; 1805 return 0;
1783 } 1806 }
1784 mutex_lock(&nbd->config_lock); 1807 nbd_disconnect_and_put(nbd);
1785 nbd_disconnect(nbd);
1786 nbd_clear_sock(nbd);
1787 mutex_unlock(&nbd->config_lock);
1788 if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
1789 &nbd->config->runtime_flags))
1790 nbd_config_put(nbd);
1791 nbd_config_put(nbd); 1808 nbd_config_put(nbd);
1792 nbd_put(nbd); 1809 nbd_put(nbd);
1793 return 0; 1810 return 0;
@@ -1798,7 +1815,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1798 struct nbd_device *nbd = NULL; 1815 struct nbd_device *nbd = NULL;
1799 struct nbd_config *config; 1816 struct nbd_config *config;
1800 int index; 1817 int index;
1801 int ret = -EINVAL; 1818 int ret = 0;
1802 bool put_dev = false; 1819 bool put_dev = false;
1803 1820
1804 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1821 if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1855,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1838 !nbd->task_recv) { 1855 !nbd->task_recv) {
1839 dev_err(nbd_to_dev(nbd), 1856 dev_err(nbd_to_dev(nbd),
1840 "not configured, cannot reconfigure\n"); 1857 "not configured, cannot reconfigure\n");
1858 ret = -EINVAL;
1841 goto out; 1859 goto out;
1842 } 1860 }
1843 1861
@@ -1862,6 +1880,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
1862 &config->runtime_flags)) 1880 &config->runtime_flags))
1863 refcount_inc(&nbd->refs); 1881 refcount_inc(&nbd->refs);
1864 } 1882 }
1883
1884 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1885 set_bit(NBD_DISCONNECT_ON_CLOSE,
1886 &config->runtime_flags);
1887 } else {
1888 clear_bit(NBD_DISCONNECT_ON_CLOSE,
1889 &config->runtime_flags);
1890 }
1865 } 1891 }
1866 1892
1867 if (info->attrs[NBD_ATTR_SOCKETS]) { 1893 if (info->attrs[NBD_ATTR_SOCKETS]) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 7948049f6c43..042c778e5a4e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
1365static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq) 1365static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
1366{ 1366{
1367 pr_info("null: rq %p timed out\n", rq); 1367 pr_info("null: rq %p timed out\n", rq);
1368 blk_mq_complete_request(rq); 1368 __blk_complete_request(rq);
1369 return BLK_EH_DONE; 1369 return BLK_EH_DONE;
1370} 1370}
1371 1371
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 14d159e2042d..2dc33e65d2d0 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -29,7 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/unaligned/le_struct.h> 32#include <asm/unaligned.h>
33#include <net/bluetooth/bluetooth.h> 33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h> 34#include <net/bluetooth/hci_core.h>
35 35
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 1cc29629d238..80d60f43db56 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
169 const char *name; 169 const char *name;
170 int nr_fck = 0, nr_ick = 0, i, error = 0; 170 int nr_fck = 0, nr_ick = 0, i, error = 0;
171 171
172 ddata->clock_roles = devm_kzalloc(ddata->dev, 172 ddata->clock_roles = devm_kcalloc(ddata->dev,
173 sizeof(*ddata->clock_roles) *
174 SYSC_MAX_CLOCKS, 173 SYSC_MAX_CLOCKS,
174 sizeof(*ddata->clock_roles),
175 GFP_KERNEL); 175 GFP_KERNEL);
176 if (!ddata->clock_roles) 176 if (!ddata->clock_roles)
177 return -ENOMEM; 177 return -ENOMEM;
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
200 return -EINVAL; 200 return -EINVAL;
201 } 201 }
202 202
203 ddata->clocks = devm_kzalloc(ddata->dev, 203 ddata->clocks = devm_kcalloc(ddata->dev,
204 sizeof(*ddata->clocks) * ddata->nr_clocks, 204 ddata->nr_clocks, sizeof(*ddata->clocks),
205 GFP_KERNEL); 205 GFP_KERNEL);
206 if (!ddata->clocks) 206 if (!ddata->clocks)
207 return -ENOMEM; 207 return -ENOMEM;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 91bb98c42a1c..aaf9e5afaad4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
516 516
517void hwrng_unregister(struct hwrng *rng) 517void hwrng_unregister(struct hwrng *rng)
518{ 518{
519 int err;
520
519 mutex_lock(&rng_mutex); 521 mutex_lock(&rng_mutex);
520 522
521 list_del(&rng->list); 523 list_del(&rng->list);
522 if (current_rng == rng) 524 if (current_rng == rng) {
523 enable_best_rng(); 525 err = enable_best_rng();
526 if (err) {
527 drop_current_rng();
528 cur_rng_set_by_user = 0;
529 }
530 }
524 531
525 if (list_empty(&rng_list)) { 532 if (list_empty(&rng_list)) {
526 mutex_unlock(&rng_mutex); 533 mutex_unlock(&rng_mutex);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index ad353be871bf..90ec010bffbd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
2088 return 0; 2088 return 0;
2089 2089
2090out_err: 2090out_err:
2091 ipmi_unregister_smi(new_smi->intf); 2091 if (new_smi->intf) {
2092 new_smi->intf = NULL; 2092 ipmi_unregister_smi(new_smi->intf);
2093 new_smi->intf = NULL;
2094 }
2093 2095
2094 kfree(init_name); 2096 kfree(init_name);
2095 2097
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index fbfc05e3f3d1..bb882ab161fe 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
210int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) 210int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
211{ 211{
212 unsigned long flags; 212 unsigned long flags;
213 int ret = 0; 213 int ret = -ENODATA;
214 u8 status; 214 u8 status;
215 215
216 spin_lock_irqsave(&kcs_bmc->lock, flags); 216 spin_lock_irqsave(&kcs_bmc->lock, flags);
217 217
218 if (!kcs_bmc->running) { 218 status = read_status(kcs_bmc);
219 kcs_force_abort(kcs_bmc); 219 if (status & KCS_STATUS_IBF) {
220 ret = -ENODEV; 220 if (!kcs_bmc->running)
221 goto out_unlock; 221 kcs_force_abort(kcs_bmc);
222 } 222 else if (status & KCS_STATUS_CMD_DAT)
223 223 kcs_bmc_handle_cmd(kcs_bmc);
224 status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT); 224 else
225 225 kcs_bmc_handle_data(kcs_bmc);
226 switch (status) {
227 case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
228 kcs_bmc_handle_cmd(kcs_bmc);
229 break;
230
231 case KCS_STATUS_IBF:
232 kcs_bmc_handle_data(kcs_bmc);
233 break;
234 226
235 default: 227 ret = 0;
236 ret = -ENODATA;
237 break;
238 } 228 }
239 229
240out_unlock:
241 spin_unlock_irqrestore(&kcs_bmc->lock, flags); 230 spin_unlock_irqrestore(&kcs_bmc->lock, flags);
242 231
243 return ret; 232 return ret;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a8fb0020ba5c..cd888d4ee605 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -402,7 +402,8 @@ static struct poolinfo {
402/* 402/*
403 * Static global variables 403 * Static global variables
404 */ 404 */
405static DECLARE_WAIT_QUEUE_HEAD(random_wait); 405static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
406static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
406static struct fasync_struct *fasync; 407static struct fasync_struct *fasync;
407 408
408static DEFINE_SPINLOCK(random_ready_list_lock); 409static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ retry:
721 722
722 /* should we wake readers? */ 723 /* should we wake readers? */
723 if (entropy_bits >= random_read_wakeup_bits && 724 if (entropy_bits >= random_read_wakeup_bits &&
724 wq_has_sleeper(&random_wait)) { 725 wq_has_sleeper(&random_read_wait)) {
725 wake_up_interruptible_poll(&random_wait, POLLIN); 726 wake_up_interruptible(&random_read_wait);
726 kill_fasync(&fasync, SIGIO, POLL_IN); 727 kill_fasync(&fasync, SIGIO, POLL_IN);
727 } 728 }
728 /* If the input pool is getting full, send some 729 /* If the input pool is getting full, send some
@@ -1396,7 +1397,7 @@ retry:
1396 trace_debit_entropy(r->name, 8 * ibytes); 1397 trace_debit_entropy(r->name, 8 * ibytes);
1397 if (ibytes && 1398 if (ibytes &&
1398 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) { 1399 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1399 wake_up_interruptible_poll(&random_wait, POLLOUT); 1400 wake_up_interruptible(&random_write_wait);
1400 kill_fasync(&fasync, SIGIO, POLL_OUT); 1401 kill_fasync(&fasync, SIGIO, POLL_OUT);
1401 } 1402 }
1402 1403
@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
1838 if (nonblock) 1839 if (nonblock)
1839 return -EAGAIN; 1840 return -EAGAIN;
1840 1841
1841 wait_event_interruptible(random_wait, 1842 wait_event_interruptible(random_read_wait,
1842 ENTROPY_BITS(&input_pool) >= 1843 ENTROPY_BITS(&input_pool) >=
1843 random_read_wakeup_bits); 1844 random_read_wakeup_bits);
1844 if (signal_pending(current)) 1845 if (signal_pending(current))
@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1875 return ret; 1876 return ret;
1876} 1877}
1877 1878
1878static struct wait_queue_head *
1879random_get_poll_head(struct file *file, __poll_t events)
1880{
1881 return &random_wait;
1882}
1883
1884static __poll_t 1879static __poll_t
1885random_poll_mask(struct file *file, __poll_t events) 1880random_poll(struct file *file, poll_table * wait)
1886{ 1881{
1887 __poll_t mask = 0; 1882 __poll_t mask;
1888 1883
1884 poll_wait(file, &random_read_wait, wait);
1885 poll_wait(file, &random_write_wait, wait);
1886 mask = 0;
1889 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) 1887 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
1890 mask |= EPOLLIN | EPOLLRDNORM; 1888 mask |= EPOLLIN | EPOLLRDNORM;
1891 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) 1889 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1992,8 +1990,7 @@ static int random_fasync(int fd, struct file *filp, int on)
1992const struct file_operations random_fops = { 1990const struct file_operations random_fops = {
1993 .read = random_read, 1991 .read = random_read,
1994 .write = random_write, 1992 .write = random_write,
1995 .get_poll_head = random_get_poll_head, 1993 .poll = random_poll,
1996 .poll_mask = random_poll_mask,
1997 .unlocked_ioctl = random_ioctl, 1994 .unlocked_ioctl = random_ioctl,
1998 .fasync = random_fasync, 1995 .fasync = random_fasync,
1999 .llseek = noop_llseek, 1996 .llseek = noop_llseek,
@@ -2326,7 +2323,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
2326 * We'll be woken up again once below random_write_wakeup_thresh, 2323 * We'll be woken up again once below random_write_wakeup_thresh,
2327 * or when the calling thread is about to terminate. 2324 * or when the calling thread is about to terminate.
2328 */ 2325 */
2329 wait_event_interruptible(random_wait, kthread_should_stop() || 2326 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2330 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); 2327 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2331 mix_pool_bytes(poolp, buffer, count); 2328 mix_pool_bytes(poolp, buffer, count);
2332 credit_entropy_bits(poolp, entropy); 2329 credit_entropy_bits(poolp, entropy);
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ae40cbe770f0..0bb25dd009d1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/
96obj-$(CONFIG_ARCH_STI) += st/ 96obj-$(CONFIG_ARCH_STI) += st/
97obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ 97obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
98obj-$(CONFIG_ARCH_SUNXI) += sunxi/ 98obj-$(CONFIG_ARCH_SUNXI) += sunxi/
99obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/ 99obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
100obj-$(CONFIG_ARCH_TEGRA) += tegra/ 100obj-$(CONFIG_ARCH_TEGRA) += tegra/
101obj-y += ti/ 101obj-y += ti/
102obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ 102obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index aae62a5b8734..d1bbee19ed0f 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
672 672
673 usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); 673 usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
674 if (IS_ERR(usb1)) { 674 if (IS_ERR(usb1)) {
675 if (PTR_ERR(usb0) == -EPROBE_DEFER) 675 if (PTR_ERR(usb1) == -EPROBE_DEFER)
676 return -EPROBE_DEFER; 676 return -EPROBE_DEFER;
677 677
678 dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", 678 dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 6a42529d31a9..cc5614567a70 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
107#ifdef CONFIG_ARCH_DAVINCI_DM355 107#ifdef CONFIG_ARCH_DAVINCI_DM355
108extern const struct davinci_psc_init_data dm355_psc_init_data; 108extern const struct davinci_psc_init_data dm355_psc_init_data;
109#endif 109#endif
110#ifdef CONFIG_ARCH_DAVINCI_DM356 110#ifdef CONFIG_ARCH_DAVINCI_DM365
111extern const struct davinci_psc_init_data dm365_psc_init_data; 111extern const struct davinci_psc_init_data dm365_psc_init_data;
112#endif 112#endif
113#ifdef CONFIG_ARCH_DAVINCI_DM644x 113#ifdef CONFIG_ARCH_DAVINCI_DM644x
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index acaa14cfa25c..49454700f2e5 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,24 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Common objects 2# Common objects
3lib-$(CONFIG_SUNXI_CCU) += ccu_common.o 3obj-y += ccu_common.o
4lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o 4obj-y += ccu_mmc_timing.o
5lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o 5obj-y += ccu_reset.o
6 6
7# Base clock types 7# Base clock types
8lib-$(CONFIG_SUNXI_CCU) += ccu_div.o 8obj-y += ccu_div.o
9lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o 9obj-y += ccu_frac.o
10lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o 10obj-y += ccu_gate.o
11lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o 11obj-y += ccu_mux.o
12lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o 12obj-y += ccu_mult.o
13lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o 13obj-y += ccu_phase.o
14lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o 14obj-y += ccu_sdm.o
15 15
16# Multi-factor clocks 16# Multi-factor clocks
17lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o 17obj-y += ccu_nk.o
18lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o 18obj-y += ccu_nkm.o
19lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o 19obj-y += ccu_nkmp.o
20lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o 20obj-y += ccu_nm.o
21lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o 21obj-y += ccu_mp.o
22 22
23# SoC support 23# SoC support
24obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o 24obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
38obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o 38obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
39obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o 39obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
40obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o 40obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
41
42# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
43# case, we want to use that goal, but even though lib.a will be properly
44# generated, it will not be linked in, eventually resulting in a linker error
45# for missing symbols.
46#
47# We can work around that by explicitly adding lib.a to the obj-y goal. This is
48# an undocumented behaviour, but works well for now.
49obj-$(CONFIG_SUNXI_CCU) += lib.a
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57cb2f00fc07..d8c7f5750cdb 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
735 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 735 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
736 clk->name = "arch_mem_timer"; 736 clk->name = "arch_mem_timer";
737 clk->rating = 400; 737 clk->rating = 400;
738 clk->cpumask = cpu_all_mask; 738 clk->cpumask = cpu_possible_mask;
739 if (arch_timer_mem_use_virtual) { 739 if (arch_timer_mem_use_virtual) {
740 clk->set_state_shutdown = arch_timer_shutdown_virt_mem; 740 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
741 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; 741 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index e5cdc3af684c..2717f88c7904 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
304 304
305 to->private_data = kzalloc(sizeof(struct stm32_timer_private), 305 to->private_data = kzalloc(sizeof(struct stm32_timer_private),
306 GFP_KERNEL); 306 GFP_KERNEL);
307 if (!to->private_data) 307 if (!to->private_data) {
308 ret = -ENOMEM;
308 goto deinit; 309 goto deinit;
310 }
309 311
310 rstc = of_reset_control_get(node, NULL); 312 rstc = of_reset_control_get(node, NULL);
311 if (!IS_ERR(rstc)) { 313 if (!IS_ERR(rstc)) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1de5ec8d5ea3..ece120da3353 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -294,6 +294,7 @@ struct pstate_funcs {
294static struct pstate_funcs pstate_funcs __read_mostly; 294static struct pstate_funcs pstate_funcs __read_mostly;
295 295
296static int hwp_active __read_mostly; 296static int hwp_active __read_mostly;
297static int hwp_mode_bdw __read_mostly;
297static bool per_cpu_limits __read_mostly; 298static bool per_cpu_limits __read_mostly;
298static bool hwp_boost __read_mostly; 299static bool hwp_boost __read_mostly;
299 300
@@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1413 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 1414 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1414 cpu->pstate.scaling = pstate_funcs.get_scaling(); 1415 cpu->pstate.scaling = pstate_funcs.get_scaling();
1415 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1416 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1416 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1417
1418 if (hwp_active && !hwp_mode_bdw) {
1419 unsigned int phy_max, current_max;
1420
1421 intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
1422 cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
1423 } else {
1424 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1425 }
1417 1426
1418 if (pstate_funcs.get_aperf_mperf_shift) 1427 if (pstate_funcs.get_aperf_mperf_shift)
1419 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); 1428 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2467static inline void intel_pstate_request_control_from_smm(void) {} 2476static inline void intel_pstate_request_control_from_smm(void) {}
2468#endif /* CONFIG_ACPI */ 2477#endif /* CONFIG_ACPI */
2469 2478
2479#define INTEL_PSTATE_HWP_BROADWELL 0x01
2480
2481#define ICPU_HWP(model, hwp_mode) \
2482 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
2483
2470static const struct x86_cpu_id hwp_support_ids[] __initconst = { 2484static const struct x86_cpu_id hwp_support_ids[] __initconst = {
2471 { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP }, 2485 ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
2486 ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
2487 ICPU_HWP(X86_MODEL_ANY, 0),
2472 {} 2488 {}
2473}; 2489};
2474 2490
2475static int __init intel_pstate_init(void) 2491static int __init intel_pstate_init(void)
2476{ 2492{
2493 const struct x86_cpu_id *id;
2477 int rc; 2494 int rc;
2478 2495
2479 if (no_load) 2496 if (no_load)
2480 return -ENODEV; 2497 return -ENODEV;
2481 2498
2482 if (x86_match_cpu(hwp_support_ids)) { 2499 id = x86_match_cpu(hwp_support_ids);
2500 if (id) {
2483 copy_cpu_funcs(&core_funcs); 2501 copy_cpu_funcs(&core_funcs);
2484 if (!no_hwp) { 2502 if (!no_hwp) {
2485 hwp_active++; 2503 hwp_active++;
2504 hwp_mode_bdw = id->driver_data;
2486 intel_pstate.attr = hwp_cpufreq_attrs; 2505 intel_pstate.attr = hwp_cpufreq_attrs;
2487 goto hwp_cpu_matched; 2506 goto hwp_cpu_matched;
2488 } 2507 }
2489 } else { 2508 } else {
2490 const struct x86_cpu_id *id;
2491
2492 id = x86_match_cpu(intel_pstate_cpu_ids); 2509 id = x86_match_cpu(intel_pstate_cpu_ids);
2493 if (!id) 2510 if (!id)
2494 return -ENODEV; 2511 return -ENODEV;
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index d049fe4b80c4..29389accf3e9 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,6 +42,8 @@ enum _msm8996_version {
42 NUM_OF_MSM8996_VERSIONS, 42 NUM_OF_MSM8996_VERSIONS,
43}; 43};
44 44
45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
46
45static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) 47static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
46{ 48{
47 size_t len; 49 size_t len;
@@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
74static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) 76static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
75{ 77{
76 struct opp_table *opp_tables[NR_CPUS] = {0}; 78 struct opp_table *opp_tables[NR_CPUS] = {0};
77 struct platform_device *cpufreq_dt_pdev;
78 enum _msm8996_version msm8996_version; 79 enum _msm8996_version msm8996_version;
79 struct nvmem_cell *speedbin_nvmem; 80 struct nvmem_cell *speedbin_nvmem;
80 struct device_node *np; 81 struct device_node *np;
@@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
86 int ret; 87 int ret;
87 88
88 cpu_dev = get_cpu_device(0); 89 cpu_dev = get_cpu_device(0);
89 if (NULL == cpu_dev) 90 if (!cpu_dev)
90 ret = -ENODEV; 91 return -ENODEV;
91 92
92 msm8996_version = qcom_cpufreq_kryo_get_msm_id(); 93 msm8996_version = qcom_cpufreq_kryo_get_msm_id();
93 if (NUM_OF_MSM8996_VERSIONS == msm8996_version) { 94 if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
96 } 97 }
97 98
98 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); 99 np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
99 if (IS_ERR(np)) 100 if (!np)
100 return PTR_ERR(np); 101 return -ENOENT;
101 102
102 ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu"); 103 ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
103 if (!ret) { 104 if (!ret) {
@@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
115 116
116 speedbin = nvmem_cell_read(speedbin_nvmem, &len); 117 speedbin = nvmem_cell_read(speedbin_nvmem, &len);
117 nvmem_cell_put(speedbin_nvmem); 118 nvmem_cell_put(speedbin_nvmem);
119 if (IS_ERR(speedbin))
120 return PTR_ERR(speedbin);
118 121
119 switch (msm8996_version) { 122 switch (msm8996_version) {
120 case MSM8996_V3: 123 case MSM8996_V3:
@@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
127 BUG(); 130 BUG();
128 break; 131 break;
129 } 132 }
133 kfree(speedbin);
130 134
131 for_each_possible_cpu(cpu) { 135 for_each_possible_cpu(cpu) {
132 cpu_dev = get_cpu_device(cpu); 136 cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@ free_opp:
162 return ret; 166 return ret;
163} 167}
164 168
169static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
170{
171 platform_device_unregister(cpufreq_dt_pdev);
172 return 0;
173}
174
165static struct platform_driver qcom_cpufreq_kryo_driver = { 175static struct platform_driver qcom_cpufreq_kryo_driver = {
166 .probe = qcom_cpufreq_kryo_probe, 176 .probe = qcom_cpufreq_kryo_probe,
177 .remove = qcom_cpufreq_kryo_remove,
167 .driver = { 178 .driver = {
168 .name = "qcom-cpufreq-kryo", 179 .name = "qcom-cpufreq-kryo",
169 }, 180 },
@@ -198,8 +209,9 @@ static int __init qcom_cpufreq_kryo_init(void)
198 if (unlikely(ret < 0)) 209 if (unlikely(ret < 0))
199 return ret; 210 return ret;
200 211
201 ret = PTR_ERR_OR_ZERO(platform_device_register_simple( 212 kryo_cpufreq_pdev = platform_device_register_simple(
202 "qcom-cpufreq-kryo", -1, NULL, 0)); 213 "qcom-cpufreq-kryo", -1, NULL, 0);
214 ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
203 if (0 == ret) 215 if (0 == ret)
204 return 0; 216 return 0;
205 217
@@ -208,5 +220,12 @@ static int __init qcom_cpufreq_kryo_init(void)
208} 220}
209module_init(qcom_cpufreq_kryo_init); 221module_init(qcom_cpufreq_kryo_init);
210 222
223static void __init qcom_cpufreq_kryo_exit(void)
224{
225 platform_device_unregister(kryo_cpufreq_pdev);
226 platform_driver_unregister(&qcom_cpufreq_kryo_driver);
227}
228module_exit(qcom_cpufreq_kryo_exit);
229
211MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver"); 230MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
212MODULE_LICENSE("GPL v2"); 231MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 00c7aab8e7d0..afebbd87c4aa 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1548,15 +1548,14 @@ skip_copy:
1548 tp->urg_data = 0; 1548 tp->urg_data = 0;
1549 1549
1550 if ((avail + offset) >= skb->len) { 1550 if ((avail + offset) >= skb->len) {
1551 if (likely(skb))
1552 chtls_free_skb(sk, skb);
1553 buffers_freed++;
1554 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { 1551 if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
1555 tp->copied_seq += skb->len; 1552 tp->copied_seq += skb->len;
1556 hws->rcvpld = skb->hdr_len; 1553 hws->rcvpld = skb->hdr_len;
1557 } else { 1554 } else {
1558 tp->copied_seq += hws->rcvpld; 1555 tp->copied_seq += hws->rcvpld;
1559 } 1556 }
1557 chtls_free_skb(sk, skb);
1558 buffers_freed++;
1560 hws->copied_seq = 0; 1559 hws->copied_seq = 0;
1561 if (copied >= target && 1560 if (copied >= target &&
1562 !skb_peek(&sk->sk_receive_queue)) 1561 !skb_peek(&sk->sk_receive_queue))
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index de2f8297a210..108c37fca782 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
189 189
190 /* prevent private mappings from being established */ 190 /* prevent private mappings from being established */
191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
192 dev_info(dev, "%s: %s: fail, attempted private mapping\n", 192 dev_info_ratelimited(dev,
193 "%s: %s: fail, attempted private mapping\n",
193 current->comm, func); 194 current->comm, func);
194 return -EINVAL; 195 return -EINVAL;
195 } 196 }
196 197
197 mask = dax_region->align - 1; 198 mask = dax_region->align - 1;
198 if (vma->vm_start & mask || vma->vm_end & mask) { 199 if (vma->vm_start & mask || vma->vm_end & mask) {
199 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", 200 dev_info_ratelimited(dev,
201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
200 current->comm, func, vma->vm_start, vma->vm_end, 202 current->comm, func, vma->vm_start, vma->vm_end,
201 mask); 203 mask);
202 return -EINVAL; 204 return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
204 206
205 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV 207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
206 && (vma->vm_flags & VM_DONTCOPY) == 0) { 208 && (vma->vm_flags & VM_DONTCOPY) == 0) {
207 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", 209 dev_info_ratelimited(dev,
210 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
208 current->comm, func); 211 current->comm, func);
209 return -EINVAL; 212 return -EINVAL;
210 } 213 }
211 214
212 if (!vma_is_dax(vma)) { 215 if (!vma_is_dax(vma)) {
213 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", 216 dev_info_ratelimited(dev,
217 "%s: %s: fail, vma is not DAX capable\n",
214 current->comm, func); 218 current->comm, func);
215 return -EINVAL; 219 return -EINVAL;
216 } 220 }
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 903d9c473749..45276abf03aa 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
86{ 86{
87 struct dax_device *dax_dev; 87 struct dax_device *dax_dev;
88 bool dax_enabled = false; 88 bool dax_enabled = false;
89 struct request_queue *q;
89 pgoff_t pgoff; 90 pgoff_t pgoff;
90 int err, id; 91 int err, id;
91 void *kaddr; 92 void *kaddr;
@@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
99 return false; 100 return false;
100 } 101 }
101 102
103 q = bdev_get_queue(bdev);
104 if (!q || !blk_queue_dax(q)) {
105 pr_debug("%s: error: request queue doesn't support dax\n",
106 bdevname(bdev, buf));
107 return false;
108 }
109
102 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); 110 err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
103 if (err) { 111 if (err) {
104 pr_debug("%s: error: unaligned partition for dax\n", 112 pr_debug("%s: error: unaligned partition for dax\n",
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fa31cccbe04f..6bfa217ed6d0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
794 struct k3_dma_dev *d = ofdma->of_dma_data; 794 struct k3_dma_dev *d = ofdma->of_dma_data;
795 unsigned int request = dma_spec->args[0]; 795 unsigned int request = dma_spec->args[0];
796 796
797 if (request > d->dma_requests) 797 if (request >= d->dma_requests)
798 return NULL; 798 return NULL;
799 799
800 return dma_get_slave_channel(&(d->chans[request].vc.chan)); 800 return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index defcdde4d358..de0957fe9668 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
3033 pd->src_addr_widths = PL330_DMA_BUSWIDTHS; 3033 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
3034 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; 3034 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
3035 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 3035 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3036 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 3036 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3037 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? 3037 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
3038 1 : PL330_MAX_BURST); 3038 1 : PL330_MAX_BURST);
3039 3039
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 9b5ca8691f27..a4a931ddf6f6 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
1485 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; 1485 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1486 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; 1486 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1487 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1487 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1488 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1488 if (__dma_omap15xx(od->plat->dma_attr))
1489 od->ddev.residue_granularity =
1490 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1491 else
1492 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1489 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ 1493 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1490 od->ddev.dev = &pdev->dev; 1494 od->ddev.dev = &pdev->dev;
1491 INIT_LIST_HEAD(&od->ddev.channels); 1495 INIT_LIST_HEAD(&od->ddev.channels);
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 951b6c79f166..624a11cb07e2 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
47DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); 47DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
48DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); 48DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
49DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); 49DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
50DEFINE_DMI_ATTR_WITH_SHOW(product_sku, 0444, DMI_PRODUCT_SKU);
50DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY); 51DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
51DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); 52DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
52DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); 53DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
193 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); 194 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
194 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); 195 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
195 ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); 196 ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
197 ADD_DMI_ATTR(product_sku, DMI_PRODUCT_SKU);
196 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); 198 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
197 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); 199 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
198 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); 200 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 54e66adef252..f2483548cde9 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
447 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); 447 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
448 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); 448 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
449 dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); 449 dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
450 dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
450 dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26); 451 dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
451 break; 452 break;
452 case 2: /* Base Board Information */ 453 case 2: /* Base Board Information */
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index caa37a6dd9d4..a90b0b8fc69a 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
64 efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID; 64 efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
65 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; 65 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
66 efi_status_t status; 66 efi_status_t status;
67 efi_physical_addr_t log_location, log_last_entry; 67 efi_physical_addr_t log_location = 0, log_last_entry = 0;
68 struct linux_efi_tpm_eventlog *log_tbl = NULL; 68 struct linux_efi_tpm_eventlog *log_tbl = NULL;
69 unsigned long first_entry_addr, last_entry_addr; 69 unsigned long first_entry_addr, last_entry_addr;
70 size_t log_size, last_entry_size; 70 size_t log_size, last_entry_size;
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index dd4edd8f22ce..7fa793672a7a 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
455 455
456 mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, 456 mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
457 &altera_cvp_ops, conf); 457 &altera_cvp_ops, conf);
458 if (!mgr) 458 if (!mgr) {
459 return -ENOMEM; 459 ret = -ENOMEM;
460 goto err_unmap;
461 }
460 462
461 pci_set_drvdata(pdev, mgr); 463 pci_set_drvdata(pdev, mgr);
462 464
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a59c07590cee..7dcbac8af9a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -190,6 +190,7 @@ struct amdgpu_job;
190struct amdgpu_irq_src; 190struct amdgpu_irq_src;
191struct amdgpu_fpriv; 191struct amdgpu_fpriv;
192struct amdgpu_bo_va_mapping; 192struct amdgpu_bo_va_mapping;
193struct amdgpu_atif;
193 194
194enum amdgpu_cp_irq { 195enum amdgpu_cp_irq {
195 AMDGPU_CP_IRQ_GFX_EOP = 0, 196 AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch {
1269/* 1270/*
1270 * ACPI 1271 * ACPI
1271 */ 1272 */
1272struct amdgpu_atif_notification_cfg {
1273 bool enabled;
1274 int command_code;
1275};
1276
1277struct amdgpu_atif_notifications {
1278 bool display_switch;
1279 bool expansion_mode_change;
1280 bool thermal_state;
1281 bool forced_power_state;
1282 bool system_power_state;
1283 bool display_conf_change;
1284 bool px_gfx_switch;
1285 bool brightness_change;
1286 bool dgpu_display_event;
1287};
1288
1289struct amdgpu_atif_functions {
1290 bool system_params;
1291 bool sbios_requests;
1292 bool select_active_disp;
1293 bool lid_state;
1294 bool get_tv_standard;
1295 bool set_tv_standard;
1296 bool get_panel_expansion_mode;
1297 bool set_panel_expansion_mode;
1298 bool temperature_change;
1299 bool graphics_device_types;
1300};
1301
1302struct amdgpu_atif {
1303 struct amdgpu_atif_notifications notifications;
1304 struct amdgpu_atif_functions functions;
1305 struct amdgpu_atif_notification_cfg notification_cfg;
1306 struct amdgpu_encoder *encoder_for_bl;
1307};
1308
1309struct amdgpu_atcs_functions { 1273struct amdgpu_atcs_functions {
1310 bool get_ext_state; 1274 bool get_ext_state;
1311 bool pcie_perf_req; 1275 bool pcie_perf_req;
@@ -1466,7 +1430,7 @@ struct amdgpu_device {
1466#if defined(CONFIG_DEBUG_FS) 1430#if defined(CONFIG_DEBUG_FS)
1467 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; 1431 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1468#endif 1432#endif
1469 struct amdgpu_atif atif; 1433 struct amdgpu_atif *atif;
1470 struct amdgpu_atcs atcs; 1434 struct amdgpu_atcs atcs;
1471 struct mutex srbm_mutex; 1435 struct mutex srbm_mutex;
1472 /* GRBM index mutex. Protects concurrent access to GRBM index */ 1436 /* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
1894static inline bool amdgpu_has_atpx(void) { return false; } 1858static inline bool amdgpu_has_atpx(void) { return false; }
1895#endif 1859#endif
1896 1860
1861#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
1862void *amdgpu_atpx_get_dhandle(void);
1863#else
1864static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
1865#endif
1866
1897/* 1867/*
1898 * KMS 1868 * KMS
1899 */ 1869 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 8fa850a070e0..0d8c3fc6eace 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -34,6 +34,45 @@
34#include "amd_acpi.h" 34#include "amd_acpi.h"
35#include "atom.h" 35#include "atom.h"
36 36
37struct amdgpu_atif_notification_cfg {
38 bool enabled;
39 int command_code;
40};
41
42struct amdgpu_atif_notifications {
43 bool display_switch;
44 bool expansion_mode_change;
45 bool thermal_state;
46 bool forced_power_state;
47 bool system_power_state;
48 bool display_conf_change;
49 bool px_gfx_switch;
50 bool brightness_change;
51 bool dgpu_display_event;
52};
53
54struct amdgpu_atif_functions {
55 bool system_params;
56 bool sbios_requests;
57 bool select_active_disp;
58 bool lid_state;
59 bool get_tv_standard;
60 bool set_tv_standard;
61 bool get_panel_expansion_mode;
62 bool set_panel_expansion_mode;
63 bool temperature_change;
64 bool graphics_device_types;
65};
66
67struct amdgpu_atif {
68 acpi_handle handle;
69
70 struct amdgpu_atif_notifications notifications;
71 struct amdgpu_atif_functions functions;
72 struct amdgpu_atif_notification_cfg notification_cfg;
73 struct amdgpu_encoder *encoder_for_bl;
74};
75
37/* Call the ATIF method 76/* Call the ATIF method
38 */ 77 */
39/** 78/**
@@ -46,8 +85,9 @@
46 * Executes the requested ATIF function (all asics). 85 * Executes the requested ATIF function (all asics).
47 * Returns a pointer to the acpi output buffer. 86 * Returns a pointer to the acpi output buffer.
48 */ 87 */
49static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function, 88static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
50 struct acpi_buffer *params) 89 int function,
90 struct acpi_buffer *params)
51{ 91{
52 acpi_status status; 92 acpi_status status;
53 union acpi_object atif_arg_elements[2]; 93 union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
70 atif_arg_elements[1].integer.value = 0; 110 atif_arg_elements[1].integer.value = 0;
71 } 111 }
72 112
73 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); 113 status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
114 &buffer);
74 115
75 /* Fail only if calling the method fails and ATIF is supported */ 116 /* Fail only if calling the method fails and ATIF is supported */
76 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 117 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
141 * (all asics). 182 * (all asics).
142 * returns 0 on success, error on failure. 183 * returns 0 on success, error on failure.
143 */ 184 */
144static int amdgpu_atif_verify_interface(acpi_handle handle, 185static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
145 struct amdgpu_atif *atif)
146{ 186{
147 union acpi_object *info; 187 union acpi_object *info;
148 struct atif_verify_interface output; 188 struct atif_verify_interface output;
149 size_t size; 189 size_t size;
150 int err = 0; 190 int err = 0;
151 191
152 info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL); 192 info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
153 if (!info) 193 if (!info)
154 return -EIO; 194 return -EIO;
155 195
@@ -176,6 +216,35 @@ out:
176 return err; 216 return err;
177} 217}
178 218
219static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
220{
221 acpi_handle handle = NULL;
222 char acpi_method_name[255] = { 0 };
223 struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
224 acpi_status status;
225
226 /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
227 * systems, ATIF is in the dGPU's namespace.
228 */
229 status = acpi_get_handle(dhandle, "ATIF", &handle);
230 if (ACPI_SUCCESS(status))
231 goto out;
232
233 if (amdgpu_has_atpx()) {
234 status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
235 &handle);
236 if (ACPI_SUCCESS(status))
237 goto out;
238 }
239
240 DRM_DEBUG_DRIVER("No ATIF handle found\n");
241 return NULL;
242out:
243 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
244 DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
245 return handle;
246}
247
179/** 248/**
180 * amdgpu_atif_get_notification_params - determine notify configuration 249 * amdgpu_atif_get_notification_params - determine notify configuration
181 * 250 *
@@ -188,15 +257,16 @@ out:
188 * where n is specified in the result if a notifier is used. 257 * where n is specified in the result if a notifier is used.
189 * Returns 0 on success, error on failure. 258 * Returns 0 on success, error on failure.
190 */ 259 */
191static int amdgpu_atif_get_notification_params(acpi_handle handle, 260static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
192 struct amdgpu_atif_notification_cfg *n)
193{ 261{
194 union acpi_object *info; 262 union acpi_object *info;
263 struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
195 struct atif_system_params params; 264 struct atif_system_params params;
196 size_t size; 265 size_t size;
197 int err = 0; 266 int err = 0;
198 267
199 info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL); 268 info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
269 NULL);
200 if (!info) { 270 if (!info) {
201 err = -EIO; 271 err = -EIO;
202 goto out; 272 goto out;
@@ -250,14 +320,15 @@ out:
250 * (all asics). 320 * (all asics).
251 * Returns 0 on success, error on failure. 321 * Returns 0 on success, error on failure.
252 */ 322 */
253static int amdgpu_atif_get_sbios_requests(acpi_handle handle, 323static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
254 struct atif_sbios_requests *req) 324 struct atif_sbios_requests *req)
255{ 325{
256 union acpi_object *info; 326 union acpi_object *info;
257 size_t size; 327 size_t size;
258 int count = 0; 328 int count = 0;
259 329
260 info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL); 330 info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
331 NULL);
261 if (!info) 332 if (!info)
262 return -EIO; 333 return -EIO;
263 334
@@ -290,11 +361,10 @@ out:
290 * Returns NOTIFY code 361 * Returns NOTIFY code
291 */ 362 */
292static int amdgpu_atif_handler(struct amdgpu_device *adev, 363static int amdgpu_atif_handler(struct amdgpu_device *adev,
293 struct acpi_bus_event *event) 364 struct acpi_bus_event *event)
294{ 365{
295 struct amdgpu_atif *atif = &adev->atif; 366 struct amdgpu_atif *atif = adev->atif;
296 struct atif_sbios_requests req; 367 struct atif_sbios_requests req;
297 acpi_handle handle;
298 int count; 368 int count;
299 369
300 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", 370 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
303 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) 373 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
304 return NOTIFY_DONE; 374 return NOTIFY_DONE;
305 375
306 if (!atif->notification_cfg.enabled || 376 if (!atif ||
377 !atif->notification_cfg.enabled ||
307 event->type != atif->notification_cfg.command_code) 378 event->type != atif->notification_cfg.command_code)
308 /* Not our event */ 379 /* Not our event */
309 return NOTIFY_DONE; 380 return NOTIFY_DONE;
310 381
311 /* Check pending SBIOS requests */ 382 /* Check pending SBIOS requests */
312 handle = ACPI_HANDLE(&adev->pdev->dev); 383 count = amdgpu_atif_get_sbios_requests(atif, &req);
313 count = amdgpu_atif_get_sbios_requests(handle, &req);
314 384
315 if (count <= 0) 385 if (count <= 0)
316 return NOTIFY_DONE; 386 return NOTIFY_DONE;
@@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
641 */ 711 */
642int amdgpu_acpi_init(struct amdgpu_device *adev) 712int amdgpu_acpi_init(struct amdgpu_device *adev)
643{ 713{
644 acpi_handle handle; 714 acpi_handle handle, atif_handle;
645 struct amdgpu_atif *atif = &adev->atif; 715 struct amdgpu_atif *atif;
646 struct amdgpu_atcs *atcs = &adev->atcs; 716 struct amdgpu_atcs *atcs = &adev->atcs;
647 int ret; 717 int ret;
648 718
@@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
658 DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); 728 DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
659 } 729 }
660 730
731 /* Probe for ATIF, and initialize it if found */
732 atif_handle = amdgpu_atif_probe_handle(handle);
733 if (!atif_handle)
734 goto out;
735
736 atif = kzalloc(sizeof(*atif), GFP_KERNEL);
737 if (!atif) {
738 DRM_WARN("Not enough memory to initialize ATIF\n");
739 goto out;
740 }
741 atif->handle = atif_handle;
742
661 /* Call the ATIF method */ 743 /* Call the ATIF method */
662 ret = amdgpu_atif_verify_interface(handle, atif); 744 ret = amdgpu_atif_verify_interface(atif);
663 if (ret) { 745 if (ret) {
664 DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); 746 DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
747 kfree(atif);
665 goto out; 748 goto out;
666 } 749 }
750 adev->atif = atif;
667 751
668 if (atif->notifications.brightness_change) { 752 if (atif->notifications.brightness_change) {
669 struct drm_encoder *tmp; 753 struct drm_encoder *tmp;
@@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
693 } 777 }
694 778
695 if (atif->functions.system_params) { 779 if (atif->functions.system_params) {
696 ret = amdgpu_atif_get_notification_params(handle, 780 ret = amdgpu_atif_get_notification_params(atif);
697 &atif->notification_cfg);
698 if (ret) { 781 if (ret) {
699 DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n", 782 DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
700 ret); 783 ret);
@@ -720,4 +803,6 @@ out:
720void amdgpu_acpi_fini(struct amdgpu_device *adev) 803void amdgpu_acpi_fini(struct amdgpu_device *adev)
721{ 804{
722 unregister_acpi_notifier(&adev->acpi_nb); 805 unregister_acpi_notifier(&adev->acpi_nb);
806 if (adev->atif)
807 kfree(adev->atif);
723} 808}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index daa06e7c5bb7..9ab89371d9e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
90 return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays; 90 return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
91} 91}
92 92
93#if defined(CONFIG_ACPI)
94void *amdgpu_atpx_get_dhandle(void) {
95 return amdgpu_atpx_priv.dhandle;
96}
97#endif
98
93/** 99/**
94 * amdgpu_atpx_call - call an ATPX method 100 * amdgpu_atpx_call - call an ATPX method
95 * 101 *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3317d1536f4f..6e5284e6c028 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2158 switch (asic_type) { 2158 switch (asic_type) {
2159#if defined(CONFIG_DRM_AMD_DC) 2159#if defined(CONFIG_DRM_AMD_DC)
2160 case CHIP_BONAIRE: 2160 case CHIP_BONAIRE:
2161 case CHIP_HAWAII:
2162 case CHIP_KAVERI: 2161 case CHIP_KAVERI:
2163 case CHIP_KABINI: 2162 case CHIP_KABINI:
2164 case CHIP_MULLINS: 2163 case CHIP_MULLINS:
2164 /*
2165 * We have systems in the wild with these ASICs that require
2166 * LVDS and VGA support which is not supported with DC.
2167 *
2168 * Fallback to the non-DC driver here by default so as not to
2169 * cause regressions.
2170 */
2171 return amdgpu_dc > 0;
2172 case CHIP_HAWAII:
2165 case CHIP_CARRIZO: 2173 case CHIP_CARRIZO:
2166 case CHIP_STONEY: 2174 case CHIP_STONEY:
2167 case CHIP_POLARIS10: 2175 case CHIP_POLARIS10:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 39ec6b8890a1..e74d620d9699 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
376 struct amdgpu_device *adev = ring->adev; 376 struct amdgpu_device *adev = ring->adev;
377 uint64_t index; 377 uint64_t index;
378 378
379 if (ring != &adev->uvd.inst[ring->me].ring) { 379 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
380 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; 380 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
381 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); 381 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
382 } else { 382 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f70eeed9ed76..7aaa263ad8c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
231 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) 231 if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
232 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; 232 fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
233 233
234 /* wrap the last IB with fence */
235 if (job && job->uf_addr) {
236 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
237 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
238 }
239
234 r = amdgpu_fence_emit(ring, f, fence_flags); 240 r = amdgpu_fence_emit(ring, f, fence_flags);
235 if (r) { 241 if (r) {
236 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 242 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
243 if (ring->funcs->insert_end) 249 if (ring->funcs->insert_end)
244 ring->funcs->insert_end(ring); 250 ring->funcs->insert_end(ring);
245 251
246 /* wrap the last IB with fence */
247 if (job && job->uf_addr) {
248 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
249 fence_flags | AMDGPU_FENCE_FLAG_64BIT);
250 }
251
252 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 252 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
253 amdgpu_ring_patch_cond_exec(ring, patch_offset); 253 amdgpu_ring_patch_cond_exec(ring, patch_offset);
254 254
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5e4e1bd90383..3526efa8960e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
764 adev->vram_pin_size += amdgpu_bo_size(bo); 764 adev->vram_pin_size += amdgpu_bo_size(bo);
765 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 765 adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
766 adev->invisible_pin_size += amdgpu_bo_size(bo);
767 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 766 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
768 adev->gart_pin_size += amdgpu_bo_size(bo); 767 adev->gart_pin_size += amdgpu_bo_size(bo);
769 } 768 }
@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
790 bo->pin_count--; 789 bo->pin_count--;
791 if (bo->pin_count) 790 if (bo->pin_count)
792 return 0; 791 return 0;
793 for (i = 0; i < bo->placement.num_placement; i++) {
794 bo->placements[i].lpfn = 0;
795 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
796 }
797 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
798 if (unlikely(r)) {
799 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
800 goto error;
801 }
802 792
803 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 793 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
804 adev->vram_pin_size -= amdgpu_bo_size(bo); 794 adev->vram_pin_size -= amdgpu_bo_size(bo);
805 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 795 adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
806 adev->invisible_pin_size -= amdgpu_bo_size(bo);
807 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 796 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
808 adev->gart_pin_size -= amdgpu_bo_size(bo); 797 adev->gart_pin_size -= amdgpu_bo_size(bo);
809 } 798 }
810 799
811error: 800 for (i = 0; i < bo->placement.num_placement; i++) {
801 bo->placements[i].lpfn = 0;
802 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
803 }
804 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
805 if (unlikely(r))
806 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
807
812 return r; 808 return r;
813} 809}
814 810
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b455da487782..fc818b4d849c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1882 if (!amdgpu_device_has_dc_support(adev)) { 1882 if (!amdgpu_device_has_dc_support(adev)) {
1883 mutex_lock(&adev->pm.mutex); 1883 mutex_lock(&adev->pm.mutex);
1884 amdgpu_dpm_get_active_displays(adev); 1884 amdgpu_dpm_get_active_displays(adev);
1885 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; 1885 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1886 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1886 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1887 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1887 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1888 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 1888 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e969c879d87e..e5da4654b630 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
73uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 73uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
74int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 74int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
75 75
76u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
76uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 77uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
77uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 78uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
78 79
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index bcf68f80bbf0..3ff08e326838 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
130 unsigned version_major, version_minor, family_id; 130 unsigned version_major, version_minor, family_id;
131 int i, j, r; 131 int i, j, r;
132 132
133 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); 133 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
134 134
135 switch (adev->asic_type) { 135 switch (adev->asic_type) {
136#ifdef CONFIG_DRM_AMDGPU_CIK 136#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
314 void *ptr; 314 void *ptr;
315 int i, j; 315 int i, j;
316 316
317 cancel_delayed_work_sync(&adev->uvd.idle_work);
318
317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 319 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
318 if (adev->uvd.inst[j].vcpu_bo == NULL) 320 if (adev->uvd.inst[j].vcpu_bo == NULL)
319 continue; 321 continue;
320 322
321 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
322
323 /* only valid for physical mode */ 323 /* only valid for physical mode */
324 if (adev->asic_type < CHIP_POLARIS10) { 324 if (adev->asic_type < CHIP_POLARIS10) {
325 for (i = 0; i < adev->uvd.max_handles; ++i) 325 for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1146{ 1146{
1147 struct amdgpu_device *adev = 1147 struct amdgpu_device *adev =
1148 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); 1148 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1149 unsigned fences = 0, i, j; 1149 unsigned fences = 0, i, j;
1150 1150
1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1167 AMD_CG_STATE_GATE); 1167 AMD_CG_STATE_GATE);
1168 } 1168 }
1169 } else { 1169 } else {
1170 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1170 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1171 } 1171 }
1172} 1172}
1173 1173
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1179 if (amdgpu_sriov_vf(adev)) 1179 if (amdgpu_sriov_vf(adev))
1180 return; 1180 return;
1181 1181
1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); 1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1183 if (set_clocks) { 1183 if (set_clocks) {
1184 if (adev->pm.dpm_enabled) { 1184 if (adev->pm.dpm_enabled) {
1185 amdgpu_dpm_enable_uvd(adev, true); 1185 amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1197{ 1197{
1198 if (!amdgpu_sriov_vf(ring->adev)) 1198 if (!amdgpu_sriov_vf(ring->adev))
1199 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1199 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1200} 1200}
1201 1201
1202/** 1202/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fba134c..8b23a1b00c76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
44 void *saved_bo; 44 void *saved_bo;
45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
47 struct delayed_work idle_work;
48 struct amdgpu_ring ring; 47 struct amdgpu_ring ring;
49 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; 48 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
50 struct amdgpu_irq_src irq; 49 struct amdgpu_irq_src irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
62 bool address_64_bit; 61 bool address_64_bit;
63 bool use_ctx_buf; 62 bool use_ctx_buf;
64 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; 63 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
64 struct delayed_work idle_work;
65}; 65};
66 66
67int amdgpu_uvd_sw_init(struct amdgpu_device *adev); 67int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 127e87b470ff..1b4ad9b2a755 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
52 unsigned long bo_size; 52 unsigned long bo_size;
53 const char *fw_name; 53 const char *fw_name;
54 const struct common_firmware_header *hdr; 54 const struct common_firmware_header *hdr;
55 unsigned version_major, version_minor, family_id; 55 unsigned char fw_check;
56 int r; 56 int r;
57 57
58 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); 58 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
83 83
84 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 84 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
85 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); 85 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
86 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
87 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
88 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
89 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
90 version_major, version_minor, family_id);
91 86
87 /* Bit 20-23, it is encode major and non-zero for new naming convention.
88 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
89 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
90 * is zero in old naming convention, this field is always zero so far.
91 * These four bits are used to tell which naming convention is present.
92 */
93 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
94 if (fw_check) {
95 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
96
97 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
98 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
99 enc_major = fw_check;
100 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
101 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
102 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
103 enc_major, enc_minor, dec_ver, vep, fw_rev);
104 } else {
105 unsigned int version_major, version_minor, family_id;
106
107 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
108 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
109 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
110 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
111 version_major, version_minor, family_id);
112 }
92 113
93 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 114 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
94 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE 115 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b0eb2f537392..edf16b2b957a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1463,7 +1463,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1463 uint64_t count; 1463 uint64_t count;
1464 1464
1465 max_entries = min(max_entries, 16ull * 1024ull); 1465 max_entries = min(max_entries, 16ull * 1024ull);
1466 for (count = 1; count < max_entries; ++count) { 1466 for (count = 1;
1467 count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1468 ++count) {
1467 uint64_t idx = pfn + count; 1469 uint64_t idx = pfn + count;
1468 1470
1469 if (pages_addr[idx] != 1471 if (pages_addr[idx] !=
@@ -1476,7 +1478,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1476 dma_addr = pages_addr; 1478 dma_addr = pages_addr;
1477 } else { 1479 } else {
1478 addr = pages_addr[pfn]; 1480 addr = pages_addr[pfn];
1479 max_entries = count; 1481 max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1480 } 1482 }
1481 1483
1482 } else if (flags & AMDGPU_PTE_VALID) { 1484 } else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1493,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1491 if (r) 1493 if (r)
1492 return r; 1494 return r;
1493 1495
1494 pfn += last - start + 1; 1496 pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
1495 if (nodes && nodes->size == pfn) { 1497 if (nodes && nodes->size == pfn) {
1496 pfn = 0; 1498 pfn = 0;
1497 ++nodes; 1499 ++nodes;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9aca653bec07..b6333f92ba45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,6 +97,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
97} 97}
98 98
99/** 99/**
100 * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
101 *
102 * @bo: &amdgpu_bo buffer object (must be in VRAM)
103 *
104 * Returns:
105 * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
106 */
107u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
108{
109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
110 struct ttm_mem_reg *mem = &bo->tbo.mem;
111 struct drm_mm_node *nodes = mem->mm_node;
112 unsigned pages = mem->num_pages;
113 u64 usage = 0;
114
115 if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
116 return 0;
117
118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
119 return amdgpu_bo_size(bo);
120
121 while (nodes && pages) {
122 usage += nodes->size << PAGE_SHIFT;
123 usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
124 pages -= nodes->size;
125 ++nodes;
126 }
127
128 return usage;
129}
130
131/**
100 * amdgpu_vram_mgr_new - allocate new ranges 132 * amdgpu_vram_mgr_new - allocate new ranges
101 * 133 *
102 * @man: TTM memory type manager 134 * @man: TTM memory type manager
@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
135 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 167 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
136 } 168 }
137 169
138 nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); 170 nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
171 GFP_KERNEL | __GFP_ZERO);
139 if (!nodes) 172 if (!nodes)
140 return -ENOMEM; 173 return -ENOMEM;
141 174
@@ -190,7 +223,7 @@ error:
190 drm_mm_remove_node(&nodes[i]); 223 drm_mm_remove_node(&nodes[i]);
191 spin_unlock(&mgr->lock); 224 spin_unlock(&mgr->lock);
192 225
193 kfree(nodes); 226 kvfree(nodes);
194 return r == -ENOSPC ? 0 : r; 227 return r == -ENOSPC ? 0 : r;
195} 228}
196 229
@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
229 atomic64_sub(usage, &mgr->usage); 262 atomic64_sub(usage, &mgr->usage);
230 atomic64_sub(vis_usage, &mgr->vis_usage); 263 atomic64_sub(vis_usage, &mgr->vis_usage);
231 264
232 kfree(mem->mm_node); 265 kvfree(mem->mm_node);
233 mem->mm_node = NULL; 266 mem->mm_node = NULL;
234} 267}
235 268
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 0999c843f623..a71b97519cc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
900 .emit_frame_size = 900 .emit_frame_size =
901 4 + /* vce_v3_0_emit_pipeline_sync */ 901 4 + /* vce_v3_0_emit_pipeline_sync */
902 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ 902 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
903 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ 903 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
904 .emit_ib = amdgpu_vce_ring_emit_ib, 904 .emit_ib = amdgpu_vce_ring_emit_ib,
905 .emit_fence = amdgpu_vce_ring_emit_fence, 905 .emit_fence = amdgpu_vce_ring_emit_fence,
906 .test_ring = amdgpu_vce_ring_test_ring, 906 .test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
924 6 + /* vce_v3_0_emit_vm_flush */ 924 6 + /* vce_v3_0_emit_vm_flush */
925 4 + /* vce_v3_0_emit_pipeline_sync */ 925 4 + /* vce_v3_0_emit_pipeline_sync */
926 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ 926 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
927 .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ 927 .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
928 .emit_ib = vce_v3_0_ring_emit_ib, 928 .emit_ib = vce_v3_0_ring_emit_ib,
929 .emit_vm_flush = vce_v3_0_emit_vm_flush, 929 .emit_vm_flush = vce_v3_0_emit_vm_flush,
930 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, 930 .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f9add85157e7..770c6b24be0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2175 return color_space; 2175 return color_space;
2176} 2176}
2177 2177
2178static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
2179{
2180 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2181 return;
2182
2183 timing_out->display_color_depth--;
2184}
2185
2186static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
2187 const struct drm_display_info *info)
2188{
2189 int normalized_clk;
2190 if (timing_out->display_color_depth <= COLOR_DEPTH_888)
2191 return;
2192 do {
2193 normalized_clk = timing_out->pix_clk_khz;
2194 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
2195 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
2196 normalized_clk /= 2;
2197 /* Adjusting pix clock following on HDMI spec based on colour depth */
2198 switch (timing_out->display_color_depth) {
2199 case COLOR_DEPTH_101010:
2200 normalized_clk = (normalized_clk * 30) / 24;
2201 break;
2202 case COLOR_DEPTH_121212:
2203 normalized_clk = (normalized_clk * 36) / 24;
2204 break;
2205 case COLOR_DEPTH_161616:
2206 normalized_clk = (normalized_clk * 48) / 24;
2207 break;
2208 default:
2209 return;
2210 }
2211 if (normalized_clk <= info->max_tmds_clock)
2212 return;
2213 reduce_mode_colour_depth(timing_out);
2214
2215 } while (timing_out->display_color_depth > COLOR_DEPTH_888);
2216
2217}
2178/*****************************************************************************/ 2218/*****************************************************************************/
2179 2219
2180static void 2220static void
@@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2183 const struct drm_connector *connector) 2223 const struct drm_connector *connector)
2184{ 2224{
2185 struct dc_crtc_timing *timing_out = &stream->timing; 2225 struct dc_crtc_timing *timing_out = &stream->timing;
2226 const struct drm_display_info *info = &connector->display_info;
2186 2227
2187 memset(timing_out, 0, sizeof(struct dc_crtc_timing)); 2228 memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2188 2229
@@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2191 timing_out->v_border_top = 0; 2232 timing_out->v_border_top = 0;
2192 timing_out->v_border_bottom = 0; 2233 timing_out->v_border_bottom = 0;
2193 /* TODO: un-hardcode */ 2234 /* TODO: un-hardcode */
2194 2235 if (drm_mode_is_420_only(info, mode_in)
2195 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 2236 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2237 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
2238 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2196 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) 2239 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2197 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 2240 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2198 else 2241 else
@@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2228 2271
2229 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 2272 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
2230 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 2273 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
2274 if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2275 adjust_colour_depth_from_display_info(timing_out, info);
2231} 2276}
2232 2277
2233static void fill_audio_info(struct audio_info *audio_info, 2278static void fill_audio_info(struct audio_info *audio_info,
@@ -3928,10 +3973,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3928 if (acrtc->base.state->event) 3973 if (acrtc->base.state->event)
3929 prepare_flip_isr(acrtc); 3974 prepare_flip_isr(acrtc);
3930 3975
3976 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3977
3931 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0]; 3978 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3932 surface_updates->flip_addr = &addr; 3979 surface_updates->flip_addr = &addr;
3933 3980
3934
3935 dc_commit_updates_for_stream(adev->dm.dc, 3981 dc_commit_updates_for_stream(adev->dm.dc,
3936 surface_updates, 3982 surface_updates,
3937 1, 3983 1,
@@ -3944,9 +3990,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3944 __func__, 3990 __func__,
3945 addr.address.grph.addr.high_part, 3991 addr.address.grph.addr.high_part,
3946 addr.address.grph.addr.low_part); 3992 addr.address.grph.addr.low_part);
3947
3948
3949 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3950} 3993}
3951 3994
3952/* 3995/*
@@ -4206,6 +4249,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4206 struct drm_connector *connector; 4249 struct drm_connector *connector;
4207 struct drm_connector_state *old_con_state, *new_con_state; 4250 struct drm_connector_state *old_con_state, *new_con_state;
4208 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 4251 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4252 int crtc_disable_count = 0;
4209 4253
4210 drm_atomic_helper_update_legacy_modeset_state(dev, state); 4254 drm_atomic_helper_update_legacy_modeset_state(dev, state);
4211 4255
@@ -4410,6 +4454,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4410 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4454 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4411 bool modeset_needed; 4455 bool modeset_needed;
4412 4456
4457 if (old_crtc_state->active && !new_crtc_state->active)
4458 crtc_disable_count++;
4459
4413 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4460 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4414 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4461 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4415 modeset_needed = modeset_required( 4462 modeset_needed = modeset_required(
@@ -4463,11 +4510,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4463 * so we can put the GPU into runtime suspend if we're not driving any 4510 * so we can put the GPU into runtime suspend if we're not driving any
4464 * displays anymore 4511 * displays anymore
4465 */ 4512 */
4513 for (i = 0; i < crtc_disable_count; i++)
4514 pm_runtime_put_autosuspend(dev->dev);
4466 pm_runtime_mark_last_busy(dev->dev); 4515 pm_runtime_mark_last_busy(dev->dev);
4467 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4468 if (old_crtc_state->active && !new_crtc_state->active)
4469 pm_runtime_put_autosuspend(dev->dev);
4470 }
4471} 4516}
4472 4517
4473 4518
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 092d800b703a..33b4de4ad66e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
1433 uint8_t acggfxclkspreadpercent; 1433 uint8_t acggfxclkspreadpercent;
1434 uint16_t acggfxclkspreadfreq; 1434 uint16_t acggfxclkspreadfreq;
1435 1435
1436 uint32_t boardreserved[10]; 1436 uint8_t Vr2_I2C_address;
1437 uint8_t padding_vr2[3];
1438
1439 uint32_t boardreserved[9];
1437}; 1440};
1438 1441
1439/* 1442/*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661fedff..d27c1c9df286 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
512 return 0; 512 return 0;
513} 513}
514 514
515static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
516 struct pp_atomfwctrl_bios_boot_up_values *boot_values,
517 struct atom_firmware_info_v3_2 *fw_info)
518{
519 uint32_t frequency = 0;
520
521 boot_values->ulRevision = fw_info->firmware_revision;
522 boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
523 boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
524 boot_values->usVddc = fw_info->bootup_vddc_mv;
525 boot_values->usVddci = fw_info->bootup_vddci_mv;
526 boot_values->usMvddc = fw_info->bootup_mvddc_mv;
527 boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
528 boot_values->ucCoolingID = fw_info->coolingsolution_id;
529 boot_values->ulSocClk = 0;
530 boot_values->ulDCEFClk = 0;
531
532 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
533 boot_values->ulSocClk = frequency;
534
535 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
536 boot_values->ulDCEFClk = frequency;
537
538 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
539 boot_values->ulEClk = frequency;
540
541 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
542 boot_values->ulVClk = frequency;
543
544 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
545 boot_values->ulDClk = frequency;
546}
547
548static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
549 struct pp_atomfwctrl_bios_boot_up_values *boot_values,
550 struct atom_firmware_info_v3_1 *fw_info)
551{
552 uint32_t frequency = 0;
553
554 boot_values->ulRevision = fw_info->firmware_revision;
555 boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
556 boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
557 boot_values->usVddc = fw_info->bootup_vddc_mv;
558 boot_values->usVddci = fw_info->bootup_vddci_mv;
559 boot_values->usMvddc = fw_info->bootup_mvddc_mv;
560 boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
561 boot_values->ucCoolingID = fw_info->coolingsolution_id;
562 boot_values->ulSocClk = 0;
563 boot_values->ulDCEFClk = 0;
564
565 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
566 boot_values->ulSocClk = frequency;
567
568 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
569 boot_values->ulDCEFClk = frequency;
570
571 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
572 boot_values->ulEClk = frequency;
573
574 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
575 boot_values->ulVClk = frequency;
576
577 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
578 boot_values->ulDClk = frequency;
579}
580
515int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, 581int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
516 struct pp_atomfwctrl_bios_boot_up_values *boot_values) 582 struct pp_atomfwctrl_bios_boot_up_values *boot_values)
517{ 583{
518 struct atom_firmware_info_v3_1 *info = NULL; 584 struct atom_firmware_info_v3_2 *fwinfo_3_2;
585 struct atom_firmware_info_v3_1 *fwinfo_3_1;
586 struct atom_common_table_header *info = NULL;
519 uint16_t ix; 587 uint16_t ix;
520 588
521 ix = GetIndexIntoMasterDataTable(firmwareinfo); 589 ix = GetIndexIntoMasterDataTable(firmwareinfo);
522 info = (struct atom_firmware_info_v3_1 *) 590 info = (struct atom_common_table_header *)
523 smu_atom_get_data_table(hwmgr->adev, 591 smu_atom_get_data_table(hwmgr->adev,
524 ix, NULL, NULL, NULL); 592 ix, NULL, NULL, NULL);
525 593
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
528 return -EINVAL; 596 return -EINVAL;
529 } 597 }
530 598
531 boot_values->ulRevision = info->firmware_revision; 599 if ((info->format_revision == 3) && (info->content_revision == 2)) {
532 boot_values->ulGfxClk = info->bootup_sclk_in10khz; 600 fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
533 boot_values->ulUClk = info->bootup_mclk_in10khz; 601 pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
534 boot_values->usVddc = info->bootup_vddc_mv; 602 boot_values, fwinfo_3_2);
535 boot_values->usVddci = info->bootup_vddci_mv; 603 } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
536 boot_values->usMvddc = info->bootup_mvddc_mv; 604 fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
537 boot_values->usVddGfx = info->bootup_vddgfx_mv; 605 pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
538 boot_values->ucCoolingID = info->coolingsolution_id; 606 boot_values, fwinfo_3_1);
539 boot_values->ulSocClk = 0; 607 } else {
540 boot_values->ulDCEFClk = 0; 608 pr_info("Fw info table revision does not match!");
609 return -EINVAL;
610 }
541 611
542 return 0; 612 return 0;
543} 613}
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
629 param->acggfxclkspreadpercent = info->acggfxclkspreadpercent; 699 param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
630 param->acggfxclkspreadfreq = info->acggfxclkspreadfreq; 700 param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
631 701
702 param->Vr2_I2C_address = info->Vr2_I2C_address;
703
632 return 0; 704 return 0;
633} 705}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4db5e6..22e21668c93a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
136 uint32_t ulUClk; 136 uint32_t ulUClk;
137 uint32_t ulSocClk; 137 uint32_t ulSocClk;
138 uint32_t ulDCEFClk; 138 uint32_t ulDCEFClk;
139 uint32_t ulEClk;
140 uint32_t ulVClk;
141 uint32_t ulDClk;
139 uint16_t usVddc; 142 uint16_t usVddc;
140 uint16_t usVddci; 143 uint16_t usVddci;
141 uint16_t usMvddc; 144 uint16_t usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
207 uint8_t acggfxclkspreadenabled; 210 uint8_t acggfxclkspreadenabled;
208 uint8_t acggfxclkspreadpercent; 211 uint8_t acggfxclkspreadpercent;
209 uint16_t acggfxclkspreadfreq; 212 uint16_t acggfxclkspreadfreq;
213
214 uint8_t Vr2_I2C_address;
210}; 215};
211 216
212int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, 217int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index dbe4b1f66784..22364875a943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
1090static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1090static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1091{ 1091{
1092 struct amdgpu_device *adev = hwmgr->adev; 1092 struct amdgpu_device *adev = hwmgr->adev;
1093 int result; 1093 int result = 0;
1094 uint32_t num_se = 0; 1094 uint32_t num_se = 0;
1095 uint32_t count, data; 1095 uint32_t count, data;
1096 1096
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e2098824d..c98e5de777cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
81 81
82 data->registry_data.disallowed_features = 0x0; 82 data->registry_data.disallowed_features = 0x0;
83 data->registry_data.od_state_in_dc_support = 0; 83 data->registry_data.od_state_in_dc_support = 0;
84 data->registry_data.thermal_support = 1;
84 data->registry_data.skip_baco_hardware = 0; 85 data->registry_data.skip_baco_hardware = 0;
85 86
86 data->registry_data.log_avfs_param = 0; 87 data->registry_data.log_avfs_param = 0;
@@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
803 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 804 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
804 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 805 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
805 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 806 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
807 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
808 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
809 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
806 smum_send_msg_to_smc_with_parameter(hwmgr, 810 smum_send_msg_to_smc_with_parameter(hwmgr,
807 PPSMC_MSG_SetMinDeepSleepDcefclk, 811 PPSMC_MSG_SetMinDeepSleepDcefclk,
808 (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); 812 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1ec198..49b38df8c7f2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
167 uint32_t mem_clock; 167 uint32_t mem_clock;
168 uint32_t soc_clock; 168 uint32_t soc_clock;
169 uint32_t dcef_clock; 169 uint32_t dcef_clock;
170 uint32_t eclock;
171 uint32_t dclock;
172 uint32_t vclock;
170}; 173};
171 174
172#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 175#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca902d8..29914700ee82 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
230 ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; 230 ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
231 } 231 }
232 232
233 ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
234
233 return 0; 235 return 0;
234} 236}
235 237
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b983cce..b08526fd1619 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -499,7 +499,10 @@ typedef struct {
499 uint8_t AcgGfxclkSpreadPercent; 499 uint8_t AcgGfxclkSpreadPercent;
500 uint16_t AcgGfxclkSpreadFreq; 500 uint16_t AcgGfxclkSpreadFreq;
501 501
502 uint32_t BoardReserved[10]; 502 uint8_t Vr2_I2C_address;
503 uint8_t padding_vr2[3];
504
505 uint32_t BoardReserved[9];
503 506
504 507
505 uint32_t MmHubPadding[7]; 508 uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 8d20faa198cf..0a788d76ed5f 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm)
278 278
279static void malidp_fini(struct drm_device *drm) 279static void malidp_fini(struct drm_device *drm)
280{ 280{
281 drm_atomic_helper_shutdown(drm);
282 drm_mode_config_cleanup(drm); 281 drm_mode_config_cleanup(drm);
283} 282}
284 283
@@ -646,6 +645,7 @@ vblank_fail:
646 malidp_de_irq_fini(drm); 645 malidp_de_irq_fini(drm);
647 drm->irq_enabled = false; 646 drm->irq_enabled = false;
648irq_init_fail: 647irq_init_fail:
648 drm_atomic_helper_shutdown(drm);
649 component_unbind_all(dev, drm); 649 component_unbind_all(dev, drm);
650bind_fail: 650bind_fail:
651 of_node_put(malidp->crtc.port); 651 of_node_put(malidp->crtc.port);
@@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev)
681 malidp_se_irq_fini(drm); 681 malidp_se_irq_fini(drm);
682 malidp_de_irq_fini(drm); 682 malidp_de_irq_fini(drm);
683 drm->irq_enabled = false; 683 drm->irq_enabled = false;
684 drm_atomic_helper_shutdown(drm);
684 component_unbind_all(dev, drm); 685 component_unbind_all(dev, drm);
685 of_node_put(malidp->crtc.port); 686 of_node_put(malidp->crtc.port);
686 malidp->crtc.port = NULL; 687 malidp->crtc.port = NULL;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index d789b46dc817..069783e715f1 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
634 .vsync_irq = MALIDP500_DE_IRQ_VSYNC, 634 .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
635 }, 635 },
636 .se_irq_map = { 636 .se_irq_map = {
637 .irq_mask = MALIDP500_SE_IRQ_CONF_MODE, 637 .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
638 MALIDP500_SE_IRQ_GLOBAL,
638 .vsync_irq = 0, 639 .vsync_irq = 0,
639 }, 640 },
640 .dc_irq_map = { 641 .dc_irq_map = {
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 7a44897c50fe..29409a65d864 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -23,6 +23,7 @@
23 23
24/* Layer specific register offsets */ 24/* Layer specific register offsets */
25#define MALIDP_LAYER_FORMAT 0x000 25#define MALIDP_LAYER_FORMAT 0x000
26#define LAYER_FORMAT_MASK 0x3f
26#define MALIDP_LAYER_CONTROL 0x004 27#define MALIDP_LAYER_CONTROL 0x004
27#define LAYER_ENABLE (1 << 0) 28#define LAYER_ENABLE (1 << 0)
28#define LAYER_FLOWCFG_MASK 7 29#define LAYER_FLOWCFG_MASK 7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
235 if (state->rotation & MALIDP_ROTATED_MASK) { 236 if (state->rotation & MALIDP_ROTATED_MASK) {
236 int val; 237 int val;
237 238
238 val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h, 239 val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
239 state->crtc_w, 240 state->crtc_h,
240 fb->format->format); 241 fb->format->format);
241 if (val < 0) 242 if (val < 0)
242 return val; 243 return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
337 dest_w = plane->state->crtc_w; 338 dest_w = plane->state->crtc_w;
338 dest_h = plane->state->crtc_h; 339 dest_h = plane->state->crtc_h;
339 340
340 malidp_hw_write(mp->hwdev, ms->format, mp->layer->base); 341 val = malidp_hw_read(mp->hwdev, mp->layer->base);
342 val = (val & ~LAYER_FORMAT_MASK) | ms->format;
343 malidp_hw_write(mp->hwdev, val, mp->layer->base);
341 344
342 for (i = 0; i < ms->n_planes; i++) { 345 for (i = 0; i < ms->n_planes; i++) {
343 /* calculate the offset for the layer's plane registers */ 346 /* calculate the offset for the layer's plane registers */
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 73c875db45f4..47e0992f3908 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
839 return ret; 839 return ret;
840 } 840 }
841 841
842 if (desc->layout.xstride && desc->layout.pstride) { 842 if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
843 int ret; 843 int ret;
844 844
845 ret = drm_plane_create_rotation_property(&plane->base, 845 ret = drm_plane_create_rotation_property(&plane->base,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 7ab36042a822..a6e8f4591e63 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -14,6 +14,7 @@
14#include <drm/bridge/mhl.h> 14#include <drm/bridge/mhl.h>
15#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
16#include <drm/drm_edid.h> 16#include <drm/drm_edid.h>
17#include <drm/drm_encoder.h>
17 18
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
@@ -36,8 +37,11 @@
36 37
37#define SII8620_BURST_BUF_LEN 288 38#define SII8620_BURST_BUF_LEN 288
38#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) 39#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
39#define MHL1_MAX_LCLK 225000 40
40#define MHL3_MAX_LCLK 600000 41#define MHL1_MAX_PCLK 75000
42#define MHL1_MAX_PCLK_PP_MODE 150000
43#define MHL3_MAX_PCLK 200000
44#define MHL3_MAX_PCLK_PP_MODE 300000
41 45
42enum sii8620_mode { 46enum sii8620_mode {
43 CM_DISCONNECTED, 47 CM_DISCONNECTED,
@@ -69,9 +73,7 @@ struct sii8620 {
69 struct regulator_bulk_data supplies[2]; 73 struct regulator_bulk_data supplies[2];
70 struct mutex lock; /* context lock, protects fields below */ 74 struct mutex lock; /* context lock, protects fields below */
71 int error; 75 int error;
72 int pixel_clock;
73 unsigned int use_packed_pixel:1; 76 unsigned int use_packed_pixel:1;
74 int video_code;
75 enum sii8620_mode mode; 77 enum sii8620_mode mode;
76 enum sii8620_sink_type sink_type; 78 enum sii8620_sink_type sink_type;
77 u8 cbus_status; 79 u8 cbus_status;
@@ -79,7 +81,9 @@ struct sii8620 {
79 u8 xstat[MHL_XDS_SIZE]; 81 u8 xstat[MHL_XDS_SIZE];
80 u8 devcap[MHL_DCAP_SIZE]; 82 u8 devcap[MHL_DCAP_SIZE];
81 u8 xdevcap[MHL_XDC_SIZE]; 83 u8 xdevcap[MHL_XDC_SIZE];
82 u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; 84 bool feature_complete;
85 bool devcap_read;
86 bool sink_detected;
83 struct edid *edid; 87 struct edid *edid;
84 unsigned int gen2_write_burst:1; 88 unsigned int gen2_write_burst:1;
85 enum sii8620_mt_state mt_state; 89 enum sii8620_mt_state mt_state;
@@ -476,7 +480,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
476 } 480 }
477} 481}
478 482
479static void sii8620_sink_detected(struct sii8620 *ctx, int ret) 483static void sii8620_identify_sink(struct sii8620 *ctx)
480{ 484{
481 static const char * const sink_str[] = { 485 static const char * const sink_str[] = {
482 [SINK_NONE] = "NONE", 486 [SINK_NONE] = "NONE",
@@ -487,7 +491,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
487 char sink_name[20]; 491 char sink_name[20];
488 struct device *dev = ctx->dev; 492 struct device *dev = ctx->dev;
489 493
490 if (ret < 0) 494 if (!ctx->sink_detected || !ctx->devcap_read)
491 return; 495 return;
492 496
493 sii8620_fetch_edid(ctx); 497 sii8620_fetch_edid(ctx);
@@ -496,6 +500,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
496 sii8620_mhl_disconnected(ctx); 500 sii8620_mhl_disconnected(ctx);
497 return; 501 return;
498 } 502 }
503 sii8620_set_upstream_edid(ctx);
499 504
500 if (drm_detect_hdmi_monitor(ctx->edid)) 505 if (drm_detect_hdmi_monitor(ctx->edid))
501 ctx->sink_type = SINK_HDMI; 506 ctx->sink_type = SINK_HDMI;
@@ -508,53 +513,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
508 sink_str[ctx->sink_type], sink_name); 513 sink_str[ctx->sink_type], sink_name);
509} 514}
510 515
511static void sii8620_hsic_init(struct sii8620 *ctx)
512{
513 if (!sii8620_is_mhl3(ctx))
514 return;
515
516 sii8620_write(ctx, REG_FCGC,
517 BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
518 sii8620_setbits(ctx, REG_HRXCTRL3,
519 BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
520 sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
521 sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
522 sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
523 sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
524 sii8620_write_seq_static(ctx,
525 REG_TDMLLCTL, 0,
526 REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
527 BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
528 REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
529 REG_HRXINTL, 0xff,
530 REG_HRXINTH, 0xff,
531 REG_TTXINTL, 0xff,
532 REG_TTXINTH, 0xff,
533 REG_TRXINTL, 0xff,
534 REG_TRXINTH, 0xff,
535 REG_HTXINTL, 0xff,
536 REG_HTXINTH, 0xff,
537 REG_FCINTR0, 0xff,
538 REG_FCINTR1, 0xff,
539 REG_FCINTR2, 0xff,
540 REG_FCINTR3, 0xff,
541 REG_FCINTR4, 0xff,
542 REG_FCINTR5, 0xff,
543 REG_FCINTR6, 0xff,
544 REG_FCINTR7, 0xff
545 );
546}
547
548static void sii8620_edid_read(struct sii8620 *ctx, int ret)
549{
550 if (ret < 0)
551 return;
552
553 sii8620_set_upstream_edid(ctx);
554 sii8620_hsic_init(ctx);
555 sii8620_enable_hpd(ctx);
556}
557
558static void sii8620_mr_devcap(struct sii8620 *ctx) 516static void sii8620_mr_devcap(struct sii8620 *ctx)
559{ 517{
560 u8 dcap[MHL_DCAP_SIZE]; 518 u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +528,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
570 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], 528 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
571 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); 529 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
572 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); 530 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
531 ctx->devcap_read = true;
532 sii8620_identify_sink(ctx);
573} 533}
574 534
575static void sii8620_mr_xdevcap(struct sii8620 *ctx) 535static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +767,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
807static void sii8620_fetch_edid(struct sii8620 *ctx) 767static void sii8620_fetch_edid(struct sii8620 *ctx)
808{ 768{
809 u8 lm_ddc, ddc_cmd, int3, cbus; 769 u8 lm_ddc, ddc_cmd, int3, cbus;
770 unsigned long timeout;
810 int fetched, i; 771 int fetched, i;
811 int edid_len = EDID_LENGTH; 772 int edid_len = EDID_LENGTH;
812 u8 *edid; 773 u8 *edid;
@@ -856,23 +817,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
856 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 817 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
857 ); 818 );
858 819
859 do { 820 int3 = 0;
860 int3 = sii8620_readb(ctx, REG_INTR3); 821 timeout = jiffies + msecs_to_jiffies(200);
822 for (;;) {
861 cbus = sii8620_readb(ctx, REG_CBUS_STATUS); 823 cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
862 824 if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
863 if (int3 & BIT_DDC_CMD_DONE) 825 kfree(edid);
864 break; 826 edid = NULL;
865 827 goto end;
866 if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { 828 }
829 if (int3 & BIT_DDC_CMD_DONE) {
830 if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
831 >= FETCH_SIZE)
832 break;
833 } else {
834 int3 = sii8620_readb(ctx, REG_INTR3);
835 }
836 if (time_is_before_jiffies(timeout)) {
837 ctx->error = -ETIMEDOUT;
838 dev_err(ctx->dev, "timeout during EDID read\n");
867 kfree(edid); 839 kfree(edid);
868 edid = NULL; 840 edid = NULL;
869 goto end; 841 goto end;
870 } 842 }
871 } while (1);
872
873 sii8620_readb(ctx, REG_DDC_STATUS);
874 while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
875 usleep_range(10, 20); 843 usleep_range(10, 20);
844 }
876 845
877 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); 846 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
878 if (fetched + FETCH_SIZE == EDID_LENGTH) { 847 if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +940,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
971 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 940 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
972 if (ret) 941 if (ret)
973 return ret; 942 return ret;
943
974 usleep_range(10000, 20000); 944 usleep_range(10000, 20000);
975 return clk_prepare_enable(ctx->clk_xtal); 945 ret = clk_prepare_enable(ctx->clk_xtal);
946 if (ret)
947 return ret;
948
949 msleep(100);
950 gpiod_set_value(ctx->gpio_reset, 0);
951 msleep(100);
952
953 return 0;
976} 954}
977 955
978static int sii8620_hw_off(struct sii8620 *ctx) 956static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +960,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
982 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 960 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
983} 961}
984 962
985static void sii8620_hw_reset(struct sii8620 *ctx)
986{
987 usleep_range(10000, 20000);
988 gpiod_set_value(ctx->gpio_reset, 0);
989 usleep_range(5000, 20000);
990 gpiod_set_value(ctx->gpio_reset, 1);
991 usleep_range(10000, 20000);
992 gpiod_set_value(ctx->gpio_reset, 0);
993 msleep(300);
994}
995
996static void sii8620_cbus_reset(struct sii8620 *ctx) 963static void sii8620_cbus_reset(struct sii8620 *ctx)
997{ 964{
998 sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST 965 sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1055,23 +1022,23 @@ static void sii8620_set_format(struct sii8620 *ctx)
1055 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, 1022 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
1056 ctx->use_packed_pixel ? ~0 : 0); 1023 ctx->use_packed_pixel ? ~0 : 0);
1057 } else { 1024 } else {
1058 if (ctx->use_packed_pixel) 1025 if (ctx->use_packed_pixel) {
1059 sii8620_write_seq_static(ctx, 1026 sii8620_write_seq_static(ctx,
1060 REG_VID_MODE, BIT_VID_MODE_M1080P, 1027 REG_VID_MODE, BIT_VID_MODE_M1080P,
1061 REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, 1028 REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
1062 REG_MHLTX_CTL6, 0x60 1029 REG_MHLTX_CTL6, 0x60
1063 ); 1030 );
1064 else 1031 } else {
1065 sii8620_write_seq_static(ctx, 1032 sii8620_write_seq_static(ctx,
1066 REG_VID_MODE, 0, 1033 REG_VID_MODE, 0,
1067 REG_MHL_TOP_CTL, 1, 1034 REG_MHL_TOP_CTL, 1,
1068 REG_MHLTX_CTL6, 0xa0 1035 REG_MHLTX_CTL6, 0xa0
1069 ); 1036 );
1037 }
1070 } 1038 }
1071 1039
1072 if (ctx->use_packed_pixel) 1040 if (ctx->use_packed_pixel)
1073 out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) | 1041 out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
1074 BIT_TPI_OUTPUT_CSCMODE709;
1075 else 1042 else
1076 out_fmt = VAL_TPI_FORMAT(RGB, FULL); 1043 out_fmt = VAL_TPI_FORMAT(RGB, FULL);
1077 1044
@@ -1128,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
1128 return frm_len; 1095 return frm_len;
1129} 1096}
1130 1097
1131static void sii8620_set_infoframes(struct sii8620 *ctx) 1098static void sii8620_set_infoframes(struct sii8620 *ctx,
1099 struct drm_display_mode *mode)
1132{ 1100{
1133 struct mhl3_infoframe mhl_frm; 1101 struct mhl3_infoframe mhl_frm;
1134 union hdmi_infoframe frm; 1102 union hdmi_infoframe frm;
1135 u8 buf[31]; 1103 u8 buf[31];
1136 int ret; 1104 int ret;
1137 1105
1106 ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
1107 mode,
1108 true);
1109 if (ctx->use_packed_pixel)
1110 frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
1111
1112 if (!ret)
1113 ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
1114 if (ret > 0)
1115 sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
1116
1138 if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { 1117 if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
1139 sii8620_write(ctx, REG_TPI_SC, 1118 sii8620_write(ctx, REG_TPI_SC,
1140 BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); 1119 BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
1141 sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
1142 ARRAY_SIZE(ctx->avif) - 3);
1143 sii8620_write(ctx, REG_PKT_FILTER_0, 1120 sii8620_write(ctx, REG_PKT_FILTER_0,
1144 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | 1121 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
1145 BIT_PKT_FILTER_0_DROP_MPEG_PKT | 1122 BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1148,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
1148 return; 1125 return;
1149 } 1126 }
1150 1127
1151 ret = hdmi_avi_infoframe_init(&frm.avi);
1152 frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
1153 frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
1154 frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
1155 frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
1156 frm.avi.video_code = ctx->video_code;
1157 if (!ret)
1158 ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
1159 if (ret > 0)
1160 sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
1161 sii8620_write(ctx, REG_PKT_FILTER_0, 1128 sii8620_write(ctx, REG_PKT_FILTER_0,
1162 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | 1129 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
1163 BIT_PKT_FILTER_0_DROP_MPEG_PKT | 1130 BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1177,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
1177 1144
1178static void sii8620_start_video(struct sii8620 *ctx) 1145static void sii8620_start_video(struct sii8620 *ctx)
1179{ 1146{
1147 struct drm_display_mode *mode =
1148 &ctx->bridge.encoder->crtc->state->adjusted_mode;
1149
1180 if (!sii8620_is_mhl3(ctx)) 1150 if (!sii8620_is_mhl3(ctx))
1181 sii8620_stop_video(ctx); 1151 sii8620_stop_video(ctx);
1182 1152
@@ -1195,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
1195 sii8620_set_format(ctx); 1165 sii8620_set_format(ctx);
1196 1166
1197 if (!sii8620_is_mhl3(ctx)) { 1167 if (!sii8620_is_mhl3(ctx)) {
1198 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1168 u8 link_mode = MHL_DST_LM_PATH_ENABLED;
1199 MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED); 1169
1170 if (ctx->use_packed_pixel)
1171 link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
1172 else
1173 link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
1174
1175 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
1200 sii8620_set_auto_zone(ctx); 1176 sii8620_set_auto_zone(ctx);
1201 } else { 1177 } else {
1202 static const struct { 1178 static const struct {
@@ -1213,10 +1189,10 @@ static void sii8620_start_video(struct sii8620 *ctx)
1213 MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, 1189 MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
1214 }; 1190 };
1215 u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; 1191 u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
1216 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); 1192 int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
1217 int i; 1193 int i;
1218 1194
1219 for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) 1195 for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
1220 if (clk < clk_spec[i].max_clk) 1196 if (clk < clk_spec[i].max_clk)
1221 break; 1197 break;
1222 1198
@@ -1242,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
1242 clk_spec[i].link_rate); 1218 clk_spec[i].link_rate);
1243 } 1219 }
1244 1220
1245 sii8620_set_infoframes(ctx); 1221 sii8620_set_infoframes(ctx, mode);
1246} 1222}
1247 1223
1248static void sii8620_disable_hpd(struct sii8620 *ctx) 1224static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1534,6 +1510,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
1534 ); 1510 );
1535} 1511}
1536 1512
1513static void sii8620_hpd_unplugged(struct sii8620 *ctx)
1514{
1515 sii8620_disable_hpd(ctx);
1516 ctx->sink_type = SINK_NONE;
1517 ctx->sink_detected = false;
1518 ctx->feature_complete = false;
1519 kfree(ctx->edid);
1520 ctx->edid = NULL;
1521}
1522
1537static void sii8620_disconnect(struct sii8620 *ctx) 1523static void sii8620_disconnect(struct sii8620 *ctx)
1538{ 1524{
1539 sii8620_disable_gen2_write_burst(ctx); 1525 sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1547,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
1561 REG_MHL_DP_CTL6, 0x2A, 1547 REG_MHL_DP_CTL6, 0x2A,
1562 REG_MHL_DP_CTL7, 0x03 1548 REG_MHL_DP_CTL7, 0x03
1563 ); 1549 );
1564 sii8620_disable_hpd(ctx); 1550 sii8620_hpd_unplugged(ctx);
1565 sii8620_write_seq_static(ctx, 1551 sii8620_write_seq_static(ctx,
1566 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, 1552 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
1567 REG_MHL_COC_CTL1, 0x07, 1553 REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1595,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
1609 memset(ctx->xstat, 0, sizeof(ctx->xstat)); 1595 memset(ctx->xstat, 0, sizeof(ctx->xstat));
1610 memset(ctx->devcap, 0, sizeof(ctx->devcap)); 1596 memset(ctx->devcap, 0, sizeof(ctx->devcap));
1611 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); 1597 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
1598 ctx->devcap_read = false;
1612 ctx->cbus_status = 0; 1599 ctx->cbus_status = 0;
1613 ctx->sink_type = SINK_NONE;
1614 kfree(ctx->edid);
1615 ctx->edid = NULL;
1616 sii8620_mt_cleanup(ctx); 1600 sii8620_mt_cleanup(ctx);
1617} 1601}
1618 1602
@@ -1699,17 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
1699 1683
1700static void sii8620_status_changed_path(struct sii8620 *ctx) 1684static void sii8620_status_changed_path(struct sii8620 *ctx)
1701{ 1685{
1702 if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { 1686 u8 link_mode;
1703 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1687
1704 MHL_DST_LM_CLK_MODE_NORMAL 1688 if (ctx->use_packed_pixel)
1705 | MHL_DST_LM_PATH_ENABLED); 1689 link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
1706 if (!sii8620_is_mhl3(ctx)) 1690 else
1707 sii8620_mt_read_devcap(ctx, false); 1691 link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
1708 sii8620_mt_set_cont(ctx, sii8620_sink_detected); 1692
1709 } else { 1693 if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
1710 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1694 link_mode |= MHL_DST_LM_PATH_ENABLED;
1711 MHL_DST_LM_CLK_MODE_NORMAL); 1695
1712 } 1696 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
1697 link_mode);
1713} 1698}
1714 1699
1715static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) 1700static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -1722,9 +1707,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
1722 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); 1707 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
1723 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); 1708 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
1724 1709
1725 if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) 1710 if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
1711 MHL_DST_CONN_DCAP_RDY) {
1726 sii8620_status_dcap_ready(ctx); 1712 sii8620_status_dcap_ready(ctx);
1727 1713
1714 if (!sii8620_is_mhl3(ctx))
1715 sii8620_mt_read_devcap(ctx, false);
1716 }
1717
1728 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) 1718 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
1729 sii8620_status_changed_path(ctx); 1719 sii8620_status_changed_path(ctx);
1730} 1720}
@@ -1808,8 +1798,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
1808 } 1798 }
1809 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) 1799 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
1810 sii8620_send_features(ctx); 1800 sii8620_send_features(ctx);
1811 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) 1801 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
1812 sii8620_edid_read(ctx, 0); 1802 ctx->feature_complete = true;
1803 if (ctx->edid)
1804 sii8620_enable_hpd(ctx);
1805 }
1813} 1806}
1814 1807
1815static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) 1808static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1877,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
1884 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) 1877 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
1885 sii8620_msc_mr_write_stat(ctx); 1878 sii8620_msc_mr_write_stat(ctx);
1886 1879
1880 if (stat & BIT_CBUS_HPD_CHG) {
1881 if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
1882 ctx->sink_detected = true;
1883 sii8620_identify_sink(ctx);
1884 } else {
1885 sii8620_hpd_unplugged(ctx);
1886 }
1887 }
1888
1887 if (stat & BIT_CBUS_MSC_MR_SET_INT) 1889 if (stat & BIT_CBUS_MSC_MR_SET_INT)
1888 sii8620_msc_mr_set_int(ctx); 1890 sii8620_msc_mr_set_int(ctx);
1889 1891
@@ -1931,14 +1933,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
1931 ctx->mt_state = MT_STATE_DONE; 1933 ctx->mt_state = MT_STATE_DONE;
1932} 1934}
1933 1935
1934static void sii8620_scdt_high(struct sii8620 *ctx)
1935{
1936 sii8620_write_seq_static(ctx,
1937 REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
1938 REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
1939 );
1940}
1941
1942static void sii8620_irq_scdt(struct sii8620 *ctx) 1936static void sii8620_irq_scdt(struct sii8620 *ctx)
1943{ 1937{
1944 u8 stat = sii8620_readb(ctx, REG_INTR5); 1938 u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1940,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
1946 if (stat & BIT_INTR_SCDT_CHANGE) { 1940 if (stat & BIT_INTR_SCDT_CHANGE) {
1947 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); 1941 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
1948 1942
1949 if (cstat & BIT_TMDS_CSTAT_P3_SCDT) { 1943 if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
1950 if (ctx->sink_type == SINK_HDMI) 1944 sii8620_start_video(ctx);
1951 /* enable infoframe interrupt */
1952 sii8620_scdt_high(ctx);
1953 else
1954 sii8620_start_video(ctx);
1955 }
1956 } 1945 }
1957 1946
1958 sii8620_write(ctx, REG_INTR5, stat); 1947 sii8620_write(ctx, REG_INTR5, stat);
1959} 1948}
1960 1949
1961static void sii8620_new_vsi(struct sii8620 *ctx)
1962{
1963 u8 vsif[11];
1964
1965 sii8620_write(ctx, REG_RX_HDMI_CTRL2,
1966 VAL_RX_HDMI_CTRL2_DEFVAL |
1967 BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
1968 sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
1969 ARRAY_SIZE(vsif));
1970}
1971
1972static void sii8620_new_avi(struct sii8620 *ctx)
1973{
1974 sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
1975 sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
1976 ARRAY_SIZE(ctx->avif));
1977}
1978
1979static void sii8620_irq_infr(struct sii8620 *ctx)
1980{
1981 u8 stat = sii8620_readb(ctx, REG_INTR8)
1982 & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
1983
1984 sii8620_write(ctx, REG_INTR8, stat);
1985
1986 if (stat & BIT_CEA_NEW_VSI)
1987 sii8620_new_vsi(ctx);
1988
1989 if (stat & BIT_CEA_NEW_AVI)
1990 sii8620_new_avi(ctx);
1991
1992 if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
1993 sii8620_start_video(ctx);
1994}
1995
1996static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) 1950static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
1997{ 1951{
1998 if (ret < 0) 1952 if (ret < 0)
@@ -2043,11 +1997,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
2043 1997
2044 if (stat & BIT_DDC_CMD_DONE) { 1998 if (stat & BIT_DDC_CMD_DONE) {
2045 sii8620_write(ctx, REG_INTR3_MASK, 0); 1999 sii8620_write(ctx, REG_INTR3_MASK, 0);
2046 if (sii8620_is_mhl3(ctx)) 2000 if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
2047 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), 2001 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
2048 MHL_INT_RC_FEAT_REQ); 2002 MHL_INT_RC_FEAT_REQ);
2049 else 2003 else
2050 sii8620_edid_read(ctx, 0); 2004 sii8620_enable_hpd(ctx);
2051 } 2005 }
2052 sii8620_write(ctx, REG_INTR3, stat); 2006 sii8620_write(ctx, REG_INTR3, stat);
2053} 2007}
@@ -2074,7 +2028,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
2074 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, 2028 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
2075 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, 2029 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
2076 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, 2030 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
2077 { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
2078 }; 2031 };
2079 struct sii8620 *ctx = data; 2032 struct sii8620 *ctx = data;
2080 u8 stats[LEN_FAST_INTR_STAT]; 2033 u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2065,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
2112 dev_err(dev, "Error powering on, %d.\n", ret); 2065 dev_err(dev, "Error powering on, %d.\n", ret);
2113 return; 2066 return;
2114 } 2067 }
2115 sii8620_hw_reset(ctx);
2116 2068
2117 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); 2069 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
2118 ret = sii8620_clear_error(ctx); 2070 ret = sii8620_clear_error(ctx);
@@ -2268,17 +2220,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
2268 rc_unregister_device(ctx->rc_dev); 2220 rc_unregister_device(ctx->rc_dev);
2269} 2221}
2270 2222
2223static int sii8620_is_packing_required(struct sii8620 *ctx,
2224 const struct drm_display_mode *mode)
2225{
2226 int max_pclk, max_pclk_pp_mode;
2227
2228 if (sii8620_is_mhl3(ctx)) {
2229 max_pclk = MHL3_MAX_PCLK;
2230 max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
2231 } else {
2232 max_pclk = MHL1_MAX_PCLK;
2233 max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
2234 }
2235
2236 if (mode->clock < max_pclk)
2237 return 0;
2238 else if (mode->clock < max_pclk_pp_mode)
2239 return 1;
2240 else
2241 return -1;
2242}
2243
2271static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, 2244static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
2272 const struct drm_display_mode *mode) 2245 const struct drm_display_mode *mode)
2273{ 2246{
2274 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2247 struct sii8620 *ctx = bridge_to_sii8620(bridge);
2248 int pack_required = sii8620_is_packing_required(ctx, mode);
2275 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & 2249 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
2276 MHL_DCAP_VID_LINK_PPIXEL; 2250 MHL_DCAP_VID_LINK_PPIXEL;
2277 unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
2278 MHL1_MAX_LCLK;
2279 max_pclk /= can_pack ? 2 : 3;
2280 2251
2281 return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK; 2252 switch (pack_required) {
2253 case 0:
2254 return MODE_OK;
2255 case 1:
2256 return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
2257 default:
2258 return MODE_CLOCK_HIGH;
2259 }
2282} 2260}
2283 2261
2284static bool sii8620_mode_fixup(struct drm_bridge *bridge, 2262static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2264,14 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
2286 struct drm_display_mode *adjusted_mode) 2264 struct drm_display_mode *adjusted_mode)
2287{ 2265{
2288 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2266 struct sii8620 *ctx = bridge_to_sii8620(bridge);
2289 int max_lclk;
2290 bool ret = true;
2291 2267
2292 mutex_lock(&ctx->lock); 2268 mutex_lock(&ctx->lock);
2293 2269
2294 max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK; 2270 ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
2295 if (max_lclk > 3 * adjusted_mode->clock) { 2271
2296 ctx->use_packed_pixel = 0;
2297 goto end;
2298 }
2299 if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
2300 max_lclk > 2 * adjusted_mode->clock) {
2301 ctx->use_packed_pixel = 1;
2302 goto end;
2303 }
2304 ret = false;
2305end:
2306 if (ret) {
2307 u8 vic = drm_match_cea_mode(adjusted_mode);
2308
2309 if (!vic) {
2310 union hdmi_infoframe frm;
2311 u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
2312
2313 /* FIXME: We need the connector here */
2314 drm_hdmi_vendor_infoframe_from_display_mode(
2315 &frm.vendor.hdmi, NULL, adjusted_mode);
2316 vic = frm.vendor.hdmi.vic;
2317 if (vic >= ARRAY_SIZE(mhl_vic))
2318 vic = 0;
2319 vic = mhl_vic[vic];
2320 }
2321 ctx->video_code = vic;
2322 ctx->pixel_clock = adjusted_mode->clock;
2323 }
2324 mutex_unlock(&ctx->lock); 2272 mutex_unlock(&ctx->lock);
2325 return ret; 2273
2274 return true;
2326} 2275}
2327 2276
2328static const struct drm_bridge_funcs sii8620_bridge_funcs = { 2277static const struct drm_bridge_funcs sii8620_bridge_funcs = {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b553a6f2ff0e..7af748ed1c58 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
369 */ 369 */
370void drm_dev_unplug(struct drm_device *dev) 370void drm_dev_unplug(struct drm_device *dev)
371{ 371{
372 drm_dev_unregister(dev);
373
374 mutex_lock(&drm_global_mutex);
375 if (dev->open_count == 0)
376 drm_dev_put(dev);
377 mutex_unlock(&drm_global_mutex);
378
379 /* 372 /*
380 * After synchronizing any critical read section is guaranteed to see 373 * After synchronizing any critical read section is guaranteed to see
381 * the new value of ->unplugged, and any critical section which might 374 * the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
384 */ 377 */
385 dev->unplugged = true; 378 dev->unplugged = true;
386 synchronize_srcu(&drm_unplug_srcu); 379 synchronize_srcu(&drm_unplug_srcu);
380
381 drm_dev_unregister(dev);
382
383 mutex_lock(&drm_global_mutex);
384 if (dev->open_count == 0)
385 drm_dev_put(dev);
386 mutex_unlock(&drm_global_mutex);
387} 387}
388EXPORT_SYMBOL(drm_dev_unplug); 388EXPORT_SYMBOL(drm_dev_unplug);
389 389
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 1f8031e30f53..cdb10f885a4f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
532 532
533 drm_mode_object_unregister(blob->dev, &blob->base); 533 drm_mode_object_unregister(blob->dev, &blob->base);
534 534
535 kfree(blob); 535 kvfree(blob);
536} 536}
537 537
538/** 538/**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
559 if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) 559 if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
560 return ERR_PTR(-EINVAL); 560 return ERR_PTR(-EINVAL);
561 561
562 blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); 562 blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
563 if (!blob) 563 if (!blob)
564 return ERR_PTR(-ENOMEM); 564 return ERR_PTR(-ENOMEM);
565 565
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
576 ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, 576 ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
577 true, drm_property_free_blob); 577 true, drm_property_free_blob);
578 if (ret) { 578 if (ret) {
579 kfree(blob); 579 kvfree(blob);
580 return ERR_PTR(-EINVAL); 580 return ERR_PTR(-EINVAL);
581 } 581 }
582 582
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e5013a999147..540b59fb4103 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = {
631 }, 631 },
632}; 632};
633 633
634static struct platform_device *etnaviv_drm;
635
634static int __init etnaviv_init(void) 636static int __init etnaviv_init(void)
635{ 637{
638 struct platform_device *pdev;
636 int ret; 639 int ret;
637 struct device_node *np; 640 struct device_node *np;
638 641
@@ -644,7 +647,7 @@ static int __init etnaviv_init(void)
644 647
645 ret = platform_driver_register(&etnaviv_platform_driver); 648 ret = platform_driver_register(&etnaviv_platform_driver);
646 if (ret != 0) 649 if (ret != 0)
647 platform_driver_unregister(&etnaviv_gpu_driver); 650 goto unregister_gpu_driver;
648 651
649 /* 652 /*
650 * If the DT contains at least one available GPU device, instantiate 653 * If the DT contains at least one available GPU device, instantiate
@@ -653,20 +656,33 @@ static int __init etnaviv_init(void)
653 for_each_compatible_node(np, NULL, "vivante,gc") { 656 for_each_compatible_node(np, NULL, "vivante,gc") {
654 if (!of_device_is_available(np)) 657 if (!of_device_is_available(np))
655 continue; 658 continue;
656 659 pdev = platform_device_register_simple("etnaviv", -1,
657 platform_device_register_simple("etnaviv", -1, NULL, 0); 660 NULL, 0);
661 if (IS_ERR(pdev)) {
662 ret = PTR_ERR(pdev);
663 of_node_put(np);
664 goto unregister_platform_driver;
665 }
666 etnaviv_drm = pdev;
658 of_node_put(np); 667 of_node_put(np);
659 break; 668 break;
660 } 669 }
661 670
671 return 0;
672
673unregister_platform_driver:
674 platform_driver_unregister(&etnaviv_platform_driver);
675unregister_gpu_driver:
676 platform_driver_unregister(&etnaviv_gpu_driver);
662 return ret; 677 return ret;
663} 678}
664module_init(etnaviv_init); 679module_init(etnaviv_init);
665 680
666static void __exit etnaviv_exit(void) 681static void __exit etnaviv_exit(void)
667{ 682{
668 platform_driver_unregister(&etnaviv_gpu_driver); 683 platform_device_unregister(etnaviv_drm);
669 platform_driver_unregister(&etnaviv_platform_driver); 684 platform_driver_unregister(&etnaviv_platform_driver);
685 platform_driver_unregister(&etnaviv_gpu_driver);
670} 686}
671module_exit(etnaviv_exit); 687module_exit(etnaviv_exit);
672 688
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index dd430f0f8ff5..90f17ff7888e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
131 struct work_struct sync_point_work; 131 struct work_struct sync_point_work;
132 int sync_point_event; 132 int sync_point_event;
133 133
134 /* hang detection */
135 u32 hangcheck_dma_addr;
136
134 void __iomem *mmio; 137 void __iomem *mmio;
135 int irq; 138 int irq;
136 139
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..50d6b88cb7aa 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -10,6 +10,7 @@
10#include "etnaviv_gem.h" 10#include "etnaviv_gem.h"
11#include "etnaviv_gpu.h" 11#include "etnaviv_gpu.h"
12#include "etnaviv_sched.h" 12#include "etnaviv_sched.h"
13#include "state.xml.h"
13 14
14static int etnaviv_job_hang_limit = 0; 15static int etnaviv_job_hang_limit = 0;
15module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); 16module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
85{ 86{
86 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); 87 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87 struct etnaviv_gpu *gpu = submit->gpu; 88 struct etnaviv_gpu *gpu = submit->gpu;
89 u32 dma_addr;
90 int change;
91
92 /*
93 * If the GPU managed to complete this jobs fence, the timout is
94 * spurious. Bail out.
95 */
96 if (fence_completed(gpu, submit->out_fence->seqno))
97 return;
98
99 /*
100 * If the GPU is still making forward progress on the front-end (which
101 * should never loop) we shift out the timeout to give it a chance to
102 * finish the job.
103 */
104 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
105 change = dma_addr - gpu->hangcheck_dma_addr;
106 if (change < 0 || change > 16) {
107 gpu->hangcheck_dma_addr = dma_addr;
108 schedule_delayed_work(&sched_job->work_tdr,
109 sched_job->sched->timeout);
110 return;
111 }
88 112
89 /* block scheduler */ 113 /* block scheduler */
90 kthread_park(gpu->sched.thread); 114 kthread_park(gpu->sched.thread);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 82c95c34447f..e868773ea509 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
265 unsigned long val; 265 unsigned long val;
266 266
267 val = readl(ctx->addr + DECON_WINCONx(win)); 267 val = readl(ctx->addr + DECON_WINCONx(win));
268 val &= ~WINCONx_BPPMODE_MASK; 268 val &= WINCONx_ENWIN_F;
269 269
270 switch (fb->format->format) { 270 switch (fb->format->format) {
271 case DRM_FORMAT_XRGB1555: 271 case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
356 writel(val, ctx->addr + DECON_VIDOSDxB(win)); 356 writel(val, ctx->addr + DECON_VIDOSDxB(win));
357 } 357 }
358 358
359 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | 359 val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
360 VIDOSD_Wx_ALPHA_B_F(0x0); 360 VIDOSD_Wx_ALPHA_B_F(0xff);
361 writel(val, ctx->addr + DECON_VIDOSDxC(win)); 361 writel(val, ctx->addr + DECON_VIDOSDxC(win));
362 362
363 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | 363 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a81b4a5e24a7..ed3cc2989f93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -420,7 +420,7 @@ err_mode_config_cleanup:
420err_free_private: 420err_free_private:
421 kfree(private); 421 kfree(private);
422err_free_drm: 422err_free_drm:
423 drm_dev_unref(drm); 423 drm_dev_put(drm);
424 424
425 return ret; 425 return ret;
426} 426}
@@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev)
444 drm->dev_private = NULL; 444 drm->dev_private = NULL;
445 dev_set_drvdata(dev, NULL); 445 dev_set_drvdata(dev, NULL);
446 446
447 drm_dev_unref(drm); 447 drm_dev_put(drm);
448} 448}
449 449
450static const struct component_master_ops exynos_drm_ops = { 450static const struct component_master_ops exynos_drm_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7fcc1a7ab1a0..27b7d34d776c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
138 138
139err: 139err:
140 while (i--) 140 while (i--)
141 drm_gem_object_unreference_unlocked(&exynos_gem[i]->base); 141 drm_gem_object_put_unlocked(&exynos_gem[i]->base);
142 142
143 return ERR_PTR(ret); 143 return ERR_PTR(ret);
144} 144}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6127ef25acd6..e8d0670bb5f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
470static void fimc_set_window(struct fimc_context *ctx, 470static void fimc_set_window(struct fimc_context *ctx,
471 struct exynos_drm_ipp_buffer *buf) 471 struct exynos_drm_ipp_buffer *buf)
472{ 472{
473 unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
473 u32 cfg, h1, h2, v1, v2; 474 u32 cfg, h1, h2, v1, v2;
474 475
475 /* cropped image */ 476 /* cropped image */
476 h1 = buf->rect.x; 477 h1 = buf->rect.x;
477 h2 = buf->buf.width - buf->rect.w - buf->rect.x; 478 h2 = real_width - buf->rect.w - buf->rect.x;
478 v1 = buf->rect.y; 479 v1 = buf->rect.y;
479 v2 = buf->buf.height - buf->rect.h - buf->rect.y; 480 v2 = buf->buf.height - buf->rect.h - buf->rect.y;
480 481
481 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", 482 DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
482 buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, 483 buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
483 buf->buf.width, buf->buf.height); 484 real_width, buf->buf.height);
484 DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); 485 DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
485 486
486 /* 487 /*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
503static void fimc_src_set_size(struct fimc_context *ctx, 504static void fimc_src_set_size(struct fimc_context *ctx,
504 struct exynos_drm_ipp_buffer *buf) 505 struct exynos_drm_ipp_buffer *buf)
505{ 506{
507 unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
506 u32 cfg; 508 u32 cfg;
507 509
508 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); 510 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
509 511
510 /* original size */ 512 /* original size */
511 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | 513 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
512 EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); 514 EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
513 515
514 fimc_write(ctx, cfg, EXYNOS_ORGISIZE); 516 fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
529 * for now, we support only ITU601 8 bit mode 531 * for now, we support only ITU601 8 bit mode
530 */ 532 */
531 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | 533 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
532 EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | 534 EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
533 EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); 535 EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
534 fimc_write(ctx, cfg, EXYNOS_CISRCFMT); 536 fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
535 537
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
842static void fimc_dst_set_size(struct fimc_context *ctx, 844static void fimc_dst_set_size(struct fimc_context *ctx,
843 struct exynos_drm_ipp_buffer *buf) 845 struct exynos_drm_ipp_buffer *buf)
844{ 846{
847 unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
845 u32 cfg, cfg_ext; 848 u32 cfg, cfg_ext;
846 849
847 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); 850 DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
848 851
849 /* original size */ 852 /* original size */
850 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | 853 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
851 EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); 854 EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
852 855
853 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); 856 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 6e1494fa71b4..bdf5a7655228 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
143 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); 143 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
144 144
145 /* drop reference from allocate - handle holds it now. */ 145 /* drop reference from allocate - handle holds it now. */
146 drm_gem_object_unreference_unlocked(obj); 146 drm_gem_object_put_unlocked(obj);
147 147
148 return 0; 148 return 0;
149} 149}
@@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
186 186
187 exynos_gem = to_exynos_gem(obj); 187 exynos_gem = to_exynos_gem(obj);
188 188
189 drm_gem_object_unreference_unlocked(obj); 189 drm_gem_object_put_unlocked(obj);
190 190
191 return exynos_gem->size; 191 return exynos_gem->size;
192} 192}
@@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
329 return; 329 return;
330 } 330 }
331 331
332 drm_gem_object_unreference_unlocked(obj); 332 drm_gem_object_put_unlocked(obj);
333 333
334 /* 334 /*
335 * decrease obj->refcount one more time because we has already 335 * decrease obj->refcount one more time because we has already
336 * increased it at exynos_drm_gem_get_dma_addr(). 336 * increased it at exynos_drm_gem_get_dma_addr().
337 */ 337 */
338 drm_gem_object_unreference_unlocked(obj); 338 drm_gem_object_put_unlocked(obj);
339} 339}
340 340
341static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 341static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
383 args->flags = exynos_gem->flags; 383 args->flags = exynos_gem->flags;
384 args->size = exynos_gem->size; 384 args->size = exynos_gem->size;
385 385
386 drm_gem_object_unreference_unlocked(obj); 386 drm_gem_object_put_unlocked(obj);
387 387
388 return 0; 388 return 0;
389} 389}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 35ac66730563..7ba414b52faa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
492 GSC_IN_CHROMA_ORDER_CRCB); 492 GSC_IN_CHROMA_ORDER_CRCB);
493 break; 493 break;
494 case DRM_FORMAT_NV21: 494 case DRM_FORMAT_NV21:
495 cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
496 break;
495 case DRM_FORMAT_NV61: 497 case DRM_FORMAT_NV61:
496 cfg |= (GSC_IN_CHROMA_ORDER_CRCB | 498 cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
497 GSC_IN_YUV420_2P);
498 break; 499 break;
499 case DRM_FORMAT_YUV422: 500 case DRM_FORMAT_YUV422:
500 cfg |= GSC_IN_YUV422_3P; 501 cfg |= GSC_IN_YUV422_3P;
501 break; 502 break;
502 case DRM_FORMAT_YUV420: 503 case DRM_FORMAT_YUV420:
504 cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
505 break;
503 case DRM_FORMAT_YVU420: 506 case DRM_FORMAT_YVU420:
504 cfg |= GSC_IN_YUV420_3P; 507 cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
505 break; 508 break;
506 case DRM_FORMAT_NV12: 509 case DRM_FORMAT_NV12:
510 cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
511 break;
507 case DRM_FORMAT_NV16: 512 case DRM_FORMAT_NV16:
508 cfg |= (GSC_IN_CHROMA_ORDER_CBCR | 513 cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
509 GSC_IN_YUV420_2P);
510 break; 514 break;
511 } 515 }
512 516
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
523 527
524 switch (degree) { 528 switch (degree) {
525 case DRM_MODE_ROTATE_0: 529 case DRM_MODE_ROTATE_0:
526 if (rotation & DRM_MODE_REFLECT_Y)
527 cfg |= GSC_IN_ROT_XFLIP;
528 if (rotation & DRM_MODE_REFLECT_X) 530 if (rotation & DRM_MODE_REFLECT_X)
531 cfg |= GSC_IN_ROT_XFLIP;
532 if (rotation & DRM_MODE_REFLECT_Y)
529 cfg |= GSC_IN_ROT_YFLIP; 533 cfg |= GSC_IN_ROT_YFLIP;
530 break; 534 break;
531 case DRM_MODE_ROTATE_90: 535 case DRM_MODE_ROTATE_90:
532 cfg |= GSC_IN_ROT_90; 536 cfg |= GSC_IN_ROT_90;
533 if (rotation & DRM_MODE_REFLECT_Y)
534 cfg |= GSC_IN_ROT_XFLIP;
535 if (rotation & DRM_MODE_REFLECT_X) 537 if (rotation & DRM_MODE_REFLECT_X)
538 cfg |= GSC_IN_ROT_XFLIP;
539 if (rotation & DRM_MODE_REFLECT_Y)
536 cfg |= GSC_IN_ROT_YFLIP; 540 cfg |= GSC_IN_ROT_YFLIP;
537 break; 541 break;
538 case DRM_MODE_ROTATE_180: 542 case DRM_MODE_ROTATE_180:
539 cfg |= GSC_IN_ROT_180; 543 cfg |= GSC_IN_ROT_180;
540 if (rotation & DRM_MODE_REFLECT_Y)
541 cfg &= ~GSC_IN_ROT_XFLIP;
542 if (rotation & DRM_MODE_REFLECT_X) 544 if (rotation & DRM_MODE_REFLECT_X)
545 cfg &= ~GSC_IN_ROT_XFLIP;
546 if (rotation & DRM_MODE_REFLECT_Y)
543 cfg &= ~GSC_IN_ROT_YFLIP; 547 cfg &= ~GSC_IN_ROT_YFLIP;
544 break; 548 break;
545 case DRM_MODE_ROTATE_270: 549 case DRM_MODE_ROTATE_270:
546 cfg |= GSC_IN_ROT_270; 550 cfg |= GSC_IN_ROT_270;
547 if (rotation & DRM_MODE_REFLECT_Y)
548 cfg &= ~GSC_IN_ROT_XFLIP;
549 if (rotation & DRM_MODE_REFLECT_X) 551 if (rotation & DRM_MODE_REFLECT_X)
552 cfg &= ~GSC_IN_ROT_XFLIP;
553 if (rotation & DRM_MODE_REFLECT_Y)
550 cfg &= ~GSC_IN_ROT_YFLIP; 554 cfg &= ~GSC_IN_ROT_YFLIP;
551 break; 555 break;
552 } 556 }
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
577 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | 581 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
578 GSC_SRCIMG_WIDTH_MASK); 582 GSC_SRCIMG_WIDTH_MASK);
579 583
580 cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | 584 cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
581 GSC_SRCIMG_HEIGHT(buf->buf.height)); 585 GSC_SRCIMG_HEIGHT(buf->buf.height));
582 586
583 gsc_write(cfg, GSC_SRCIMG_SIZE); 587 gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
672 GSC_OUT_CHROMA_ORDER_CRCB); 676 GSC_OUT_CHROMA_ORDER_CRCB);
673 break; 677 break;
674 case DRM_FORMAT_NV21: 678 case DRM_FORMAT_NV21:
675 case DRM_FORMAT_NV61:
676 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P); 679 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
677 break; 680 break;
681 case DRM_FORMAT_NV61:
682 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
683 break;
678 case DRM_FORMAT_YUV422: 684 case DRM_FORMAT_YUV422:
685 cfg |= GSC_OUT_YUV422_3P;
686 break;
679 case DRM_FORMAT_YUV420: 687 case DRM_FORMAT_YUV420:
688 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
689 break;
680 case DRM_FORMAT_YVU420: 690 case DRM_FORMAT_YVU420:
681 cfg |= GSC_OUT_YUV420_3P; 691 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
682 break; 692 break;
683 case DRM_FORMAT_NV12: 693 case DRM_FORMAT_NV12:
694 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
695 break;
684 case DRM_FORMAT_NV16: 696 case DRM_FORMAT_NV16:
685 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | 697 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
686 GSC_OUT_YUV420_2P);
687 break; 698 break;
688 } 699 }
689 700
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
868 /* original size */ 879 /* original size */
869 cfg = gsc_read(GSC_DSTIMG_SIZE); 880 cfg = gsc_read(GSC_DSTIMG_SIZE);
870 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); 881 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
871 cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | 882 cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
872 GSC_DSTIMG_HEIGHT(buf->buf.height); 883 GSC_DSTIMG_HEIGHT(buf->buf.height);
873 gsc_write(cfg, GSC_DSTIMG_SIZE); 884 gsc_write(cfg, GSC_DSTIMG_SIZE);
874 885
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
1341}; 1352};
1342 1353
1343static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { 1354static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
1344 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, 1355 { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
1345 { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, 1356 { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
1346 { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, 1357 { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
1347 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, 1358 { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 26374e58c557..b435db8fc916 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
345 int ret = 0; 345 int ret = 0;
346 int i; 346 int i;
347 347
348 /* basic checks */
349 if (buf->buf.width == 0 || buf->buf.height == 0)
350 return -EINVAL;
351 buf->format = drm_format_info(buf->buf.fourcc);
352 for (i = 0; i < buf->format->num_planes; i++) {
353 unsigned int width = (i == 0) ? buf->buf.width :
354 DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
355
356 if (buf->buf.pitch[i] == 0)
357 buf->buf.pitch[i] = width * buf->format->cpp[i];
358 if (buf->buf.pitch[i] < width * buf->format->cpp[i])
359 return -EINVAL;
360 if (!buf->buf.gem_id[i])
361 return -ENOENT;
362 }
363
364 /* pitch for additional planes must match */
365 if (buf->format->num_planes > 2 &&
366 buf->buf.pitch[1] != buf->buf.pitch[2])
367 return -EINVAL;
368
369 /* get GEM buffers and check their size */ 348 /* get GEM buffers and check their size */
370 for (i = 0; i < buf->format->num_planes; i++) { 349 for (i = 0; i < buf->format->num_planes; i++) {
371 unsigned int height = (i == 0) ? buf->buf.height : 350 unsigned int height = (i == 0) ? buf->buf.height :
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
428 IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX 407 IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
429}; 408};
430 409
431static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { 410static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
432 [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, 411 [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
433 [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, 412 [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
434 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, 413 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
495 enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; 474 enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
496 struct drm_ipp_limit l; 475 struct drm_ipp_limit l;
497 struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; 476 struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
477 int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
498 478
499 if (!limits) 479 if (!limits)
500 return 0; 480 return 0;
501 481
502 __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); 482 __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
503 if (!__size_limit_check(buf->buf.width, &l.h) || 483 if (!__size_limit_check(real_width, &l.h) ||
504 !__size_limit_check(buf->buf.height, &l.v)) 484 !__size_limit_check(buf->buf.height, &l.v))
505 return -EINVAL; 485 return -EINVAL;
506 486
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
560 return 0; 540 return 0;
561} 541}
562 542
543static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
544 struct exynos_drm_ipp_buffer *buf,
545 struct exynos_drm_ipp_buffer *src,
546 struct exynos_drm_ipp_buffer *dst,
547 bool rotate, bool swap)
548{
549 const struct exynos_drm_ipp_formats *fmt;
550 int ret, i;
551
552 fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
553 buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
554 DRM_EXYNOS_IPP_FORMAT_DESTINATION);
555 if (!fmt) {
556 DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
557 buf == src ? "src" : "dst");
558 return -EINVAL;
559 }
560
561 /* basic checks */
562 if (buf->buf.width == 0 || buf->buf.height == 0)
563 return -EINVAL;
564
565 buf->format = drm_format_info(buf->buf.fourcc);
566 for (i = 0; i < buf->format->num_planes; i++) {
567 unsigned int width = (i == 0) ? buf->buf.width :
568 DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
569
570 if (buf->buf.pitch[i] == 0)
571 buf->buf.pitch[i] = width * buf->format->cpp[i];
572 if (buf->buf.pitch[i] < width * buf->format->cpp[i])
573 return -EINVAL;
574 if (!buf->buf.gem_id[i])
575 return -ENOENT;
576 }
577
578 /* pitch for additional planes must match */
579 if (buf->format->num_planes > 2 &&
580 buf->buf.pitch[1] != buf->buf.pitch[2])
581 return -EINVAL;
582
583 /* check driver limits */
584 ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
585 fmt->num_limits,
586 rotate,
587 buf == dst ? swap : false);
588 if (ret)
589 return ret;
590 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
591 fmt->limits,
592 fmt->num_limits, swap);
593 return ret;
594}
595
563static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) 596static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
564{ 597{
565 struct exynos_drm_ipp *ipp = task->ipp; 598 struct exynos_drm_ipp *ipp = task->ipp;
566 const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
567 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; 599 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
568 unsigned int rotation = task->transform.rotation; 600 unsigned int rotation = task->transform.rotation;
569 int ret = 0; 601 int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
607 return -EINVAL; 639 return -EINVAL;
608 } 640 }
609 641
610 src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, 642 ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
611 DRM_EXYNOS_IPP_FORMAT_SOURCE);
612 if (!src_fmt) {
613 DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
614 return -EINVAL;
615 }
616 ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
617 src_fmt->num_limits,
618 rotate, false);
619 if (ret)
620 return ret;
621 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
622 src_fmt->limits,
623 src_fmt->num_limits, swap);
624 if (ret) 643 if (ret)
625 return ret; 644 return ret;
626 645
627 dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, 646 ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
628 DRM_EXYNOS_IPP_FORMAT_DESTINATION);
629 if (!dst_fmt) {
630 DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
631 return -EINVAL;
632 }
633 ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
634 dst_fmt->num_limits,
635 false, swap);
636 if (ret)
637 return ret;
638 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
639 dst_fmt->limits,
640 dst_fmt->num_limits, swap);
641 if (ret) 647 if (ret)
642 return ret; 648 return ret;
643 649
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 38a2a7f1204b..7098c6d35266 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
132 if (plane->state) { 132 if (plane->state) {
133 exynos_state = to_exynos_plane_state(plane->state); 133 exynos_state = to_exynos_plane_state(plane->state);
134 if (exynos_state->base.fb) 134 if (exynos_state->base.fb)
135 drm_framebuffer_unreference(exynos_state->base.fb); 135 drm_framebuffer_put(exynos_state->base.fb);
136 kfree(exynos_state); 136 kfree(exynos_state);
137 plane->state = NULL; 137 plane->state = NULL;
138 } 138 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1a76dd3d52e1..a820a68429b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
168 val &= ~ROT_CONTROL_FLIP_MASK; 168 val &= ~ROT_CONTROL_FLIP_MASK;
169 169
170 if (rotation & DRM_MODE_REFLECT_X) 170 if (rotation & DRM_MODE_REFLECT_X)
171 val |= ROT_CONTROL_FLIP_HORIZONTAL;
172 if (rotation & DRM_MODE_REFLECT_Y)
173 val |= ROT_CONTROL_FLIP_VERTICAL; 171 val |= ROT_CONTROL_FLIP_VERTICAL;
172 if (rotation & DRM_MODE_REFLECT_Y)
173 val |= ROT_CONTROL_FLIP_HORIZONTAL;
174 174
175 val &= ~ROT_CONTROL_ROT_MASK; 175 val &= ~ROT_CONTROL_ROT_MASK;
176 176
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 91d4382343d0..0ddb6eec7b11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -30,6 +30,7 @@
30#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) 30#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
31#define SCALER_MAX_CLK 4 31#define SCALER_MAX_CLK 4
32#define SCALER_AUTOSUSPEND_DELAY 2000 32#define SCALER_AUTOSUSPEND_DELAY 2000
33#define SCALER_RESET_WAIT_RETRIES 100
33 34
34struct scaler_data { 35struct scaler_data {
35 const char *clk_name[SCALER_MAX_CLK]; 36 const char *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
51static u32 scaler_get_format(u32 drm_fmt) 52static u32 scaler_get_format(u32 drm_fmt)
52{ 53{
53 switch (drm_fmt) { 54 switch (drm_fmt) {
54 case DRM_FORMAT_NV21:
55 return SCALER_YUV420_2P_UV;
56 case DRM_FORMAT_NV12: 55 case DRM_FORMAT_NV12:
56 return SCALER_YUV420_2P_UV;
57 case DRM_FORMAT_NV21:
57 return SCALER_YUV420_2P_VU; 58 return SCALER_YUV420_2P_VU;
58 case DRM_FORMAT_YUV420: 59 case DRM_FORMAT_YUV420:
59 return SCALER_YUV420_3P; 60 return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
63 return SCALER_YUV422_1P_UYVY; 64 return SCALER_YUV422_1P_UYVY;
64 case DRM_FORMAT_YVYU: 65 case DRM_FORMAT_YVYU:
65 return SCALER_YUV422_1P_YVYU; 66 return SCALER_YUV422_1P_YVYU;
66 case DRM_FORMAT_NV61:
67 return SCALER_YUV422_2P_UV;
68 case DRM_FORMAT_NV16: 67 case DRM_FORMAT_NV16:
68 return SCALER_YUV422_2P_UV;
69 case DRM_FORMAT_NV61:
69 return SCALER_YUV422_2P_VU; 70 return SCALER_YUV422_2P_VU;
70 case DRM_FORMAT_YUV422: 71 case DRM_FORMAT_YUV422:
71 return SCALER_YUV422_3P; 72 return SCALER_YUV422_3P;
72 case DRM_FORMAT_NV42:
73 return SCALER_YUV444_2P_UV;
74 case DRM_FORMAT_NV24: 73 case DRM_FORMAT_NV24:
74 return SCALER_YUV444_2P_UV;
75 case DRM_FORMAT_NV42:
75 return SCALER_YUV444_2P_VU; 76 return SCALER_YUV444_2P_VU;
76 case DRM_FORMAT_YUV444: 77 case DRM_FORMAT_YUV444:
77 return SCALER_YUV444_3P; 78 return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
100 return 0; 101 return 0;
101} 102}
102 103
104static inline int scaler_reset(struct scaler_context *scaler)
105{
106 int retry = SCALER_RESET_WAIT_RETRIES;
107
108 scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
109 do {
110 cpu_relax();
111 } while (retry > 1 &&
112 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
113 do {
114 cpu_relax();
115 scaler_write(1, SCALER_INT_EN);
116 } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
117
118 return retry ? 0 : -EIO;
119}
120
103static inline void scaler_enable_int(struct scaler_context *scaler) 121static inline void scaler_enable_int(struct scaler_context *scaler)
104{ 122{
105 u32 val; 123 u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
354 u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); 372 u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
355 struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; 373 struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
356 374
357 scaler->task = task;
358
359 pm_runtime_get_sync(scaler->dev); 375 pm_runtime_get_sync(scaler->dev);
376 if (scaler_reset(scaler)) {
377 pm_runtime_put(scaler->dev);
378 return -EIO;
379 }
380
381 scaler->task = task;
360 382
361 scaler_set_src_fmt(scaler, src_fmt); 383 scaler_set_src_fmt(scaler, src_fmt);
362 scaler_set_src_base(scaler, &task->src); 384 scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
394 416
395static inline u32 scaler_get_int_status(struct scaler_context *scaler) 417static inline u32 scaler_get_int_status(struct scaler_context *scaler)
396{ 418{
397 return scaler_read(SCALER_INT_STATUS); 419 u32 val = scaler_read(SCALER_INT_STATUS);
420
421 scaler_write(val, SCALER_INT_STATUS);
422
423 return val;
398} 424}
399 425
400static inline int scaler_task_done(u32 val) 426static inline int scaler_task_done(u32 val)
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 4704a993cbb7..16b39734115c 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
138#define GSC_OUT_YUV420_3P (3 << 4) 138#define GSC_OUT_YUV420_3P (3 << 4)
139#define GSC_OUT_YUV422_1P (4 << 4) 139#define GSC_OUT_YUV422_1P (4 << 4)
140#define GSC_OUT_YUV422_2P (5 << 4) 140#define GSC_OUT_YUV422_2P (5 << 4)
141#define GSC_OUT_YUV422_3P (6 << 4)
141#define GSC_OUT_YUV444 (7 << 4) 142#define GSC_OUT_YUV444 (7 << 4)
142#define GSC_OUT_TILE_TYPE_MASK (1 << 2) 143#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
143#define GSC_OUT_TILE_C_16x8 (0 << 2) 144#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8180e8d1e2..4b072ade8c38 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
196 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | 196 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
197 TRANS_DDI_PORT_MASK); 197 TRANS_DDI_PORT_MASK);
198 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= 198 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
199 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 199 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
200 (PORT_B << TRANS_DDI_PORT_SHIFT) | 200 (PORT_B << TRANS_DDI_PORT_SHIFT) |
201 TRANS_DDI_FUNC_ENABLE); 201 TRANS_DDI_FUNC_ENABLE);
202 if (IS_BROADWELL(dev_priv)) { 202 if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
216 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | 216 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
217 TRANS_DDI_PORT_MASK); 217 TRANS_DDI_PORT_MASK);
218 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= 218 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
219 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 219 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
220 (PORT_C << TRANS_DDI_PORT_SHIFT) | 220 (PORT_C << TRANS_DDI_PORT_SHIFT) |
221 TRANS_DDI_FUNC_ENABLE); 221 TRANS_DDI_FUNC_ENABLE);
222 if (IS_BROADWELL(dev_priv)) { 222 if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
236 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | 236 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
237 TRANS_DDI_PORT_MASK); 237 TRANS_DDI_PORT_MASK);
238 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= 238 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
239 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 239 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
240 (PORT_D << TRANS_DDI_PORT_SHIFT) | 240 (PORT_D << TRANS_DDI_PORT_SHIFT) |
241 TRANS_DDI_FUNC_ENABLE); 241 TRANS_DDI_FUNC_ENABLE);
242 if (IS_BROADWELL(dev_priv)) { 242 if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 23296547da95..4efec8fa6c1d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1592 vgpu_free_mm(mm); 1592 vgpu_free_mm(mm);
1593 return ERR_PTR(-ENOMEM); 1593 return ERR_PTR(-ENOMEM);
1594 } 1594 }
1595 mm->ggtt_mm.last_partial_off = -1UL;
1595 1596
1596 return mm; 1597 return mm;
1597} 1598}
@@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1616 invalidate_ppgtt_mm(mm); 1617 invalidate_ppgtt_mm(mm);
1617 } else { 1618 } else {
1618 vfree(mm->ggtt_mm.virtual_ggtt); 1619 vfree(mm->ggtt_mm.virtual_ggtt);
1620 mm->ggtt_mm.last_partial_off = -1UL;
1619 } 1621 }
1620 1622
1621 vgpu_free_mm(mm); 1623 vgpu_free_mm(mm);
@@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1868 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1870 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1869 bytes); 1871 bytes);
1870 1872
1873 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
1874 * write, we assume the two 4 bytes writes are consecutive.
1875 * Otherwise, we abort and report error
1876 */
1877 if (bytes < info->gtt_entry_size) {
1878 if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
1879 /* the first partial part*/
1880 ggtt_mm->ggtt_mm.last_partial_off = off;
1881 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
1882 return 0;
1883 } else if ((g_gtt_index ==
1884 (ggtt_mm->ggtt_mm.last_partial_off >>
1885 info->gtt_entry_size_shift)) &&
1886 (off != ggtt_mm->ggtt_mm.last_partial_off)) {
1887 /* the second partial part */
1888
1889 int last_off = ggtt_mm->ggtt_mm.last_partial_off &
1890 (info->gtt_entry_size - 1);
1891
1892 memcpy((void *)&e.val64 + last_off,
1893 (void *)&ggtt_mm->ggtt_mm.last_partial_data +
1894 last_off, bytes);
1895
1896 ggtt_mm->ggtt_mm.last_partial_off = -1UL;
1897 } else {
1898 int last_offset;
1899
1900 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
1901 ggtt_mm->ggtt_mm.last_partial_off, off,
1902 bytes, info->gtt_entry_size);
1903
1904 /* set host ggtt entry to scratch page and clear
1905 * virtual ggtt entry as not present for last
1906 * partially write offset
1907 */
1908 last_offset = ggtt_mm->ggtt_mm.last_partial_off &
1909 (~(info->gtt_entry_size - 1));
1910
1911 ggtt_get_host_entry(ggtt_mm, &m, last_offset);
1912 ggtt_invalidate_pte(vgpu, &m);
1913 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1914 ops->clear_present(&m);
1915 ggtt_set_host_entry(ggtt_mm, &m, last_offset);
1916 ggtt_invalidate(gvt->dev_priv);
1917
1918 ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
1919 ops->clear_present(&e);
1920 ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
1921
1922 ggtt_mm->ggtt_mm.last_partial_off = off;
1923 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
1924
1925 return 0;
1926 }
1927 }
1928
1871 if (ops->test_present(&e)) { 1929 if (ops->test_present(&e)) {
1872 gfn = ops->get_pfn(&e); 1930 gfn = ops->get_pfn(&e);
1873 m = e; 1931 m = e;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 3792f2b7f4ff..97e62647418a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -150,6 +150,8 @@ struct intel_vgpu_mm {
150 } ppgtt_mm; 150 } ppgtt_mm;
151 struct { 151 struct {
152 void *virtual_ggtt; 152 void *virtual_ggtt;
153 unsigned long last_partial_off;
154 u64 last_partial_data;
153 } ggtt_mm; 155 } ggtt_mm;
154 }; 156 };
155}; 157};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 34c125e2d90c..52f3b91d14fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -340,14 +340,21 @@ struct drm_i915_file_private {
340 340
341 unsigned int bsd_engine; 341 unsigned int bsd_engine;
342 342
343/* Client can have a maximum of 3 contexts banned before 343/*
344 * it is denied of creating new contexts. As one context 344 * Every context ban increments per client ban score. Also
345 * ban needs 4 consecutive hangs, and more if there is 345 * hangs in short succession increments ban score. If ban threshold
346 * progress in between, this is a last resort stop gap measure 346 * is reached, client is considered banned and submitting more work
347 * to limit the badly behaving clients access to gpu. 347 * will fail. This is a stop gap measure to limit the badly behaving
348 * clients access to gpu. Note that unbannable contexts never increment
349 * the client ban score.
348 */ 350 */
349#define I915_MAX_CLIENT_CONTEXT_BANS 3 351#define I915_CLIENT_SCORE_HANG_FAST 1
350 atomic_t context_bans; 352#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
353#define I915_CLIENT_SCORE_CONTEXT_BAN 3
354#define I915_CLIENT_SCORE_BANNED 9
355 /** ban_score: Accumulated score of all ctx bans and fast hangs. */
356 atomic_t ban_score;
357 unsigned long hang_timestamp;
351}; 358};
352 359
353/* Interface history: 360/* Interface history:
@@ -2238,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
2238 **/ 2245 **/
2239static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2246static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2240{ 2247{
2241#ifdef CONFIG_DEBUG_SG
2242 BUG_ON(sg->sg_magic != SG_MAGIC);
2243#endif
2244 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2248 return sg_is_last(sg) ? NULL : ____sg_next(sg);
2245} 2249}
2246 2250
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3704f4c0c2c9..17c5097721e8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf)
2002 bool write = !!(vmf->flags & FAULT_FLAG_WRITE); 2002 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
2003 struct i915_vma *vma; 2003 struct i915_vma *vma;
2004 pgoff_t page_offset; 2004 pgoff_t page_offset;
2005 unsigned int flags;
2006 int ret; 2005 int ret;
2007 2006
2008 /* We don't use vmf->pgoff since that has the fake offset */ 2007 /* We don't use vmf->pgoff since that has the fake offset */
@@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf)
2038 goto err_unlock; 2037 goto err_unlock;
2039 } 2038 }
2040 2039
2041 /* If the object is smaller than a couple of partial vma, it is
2042 * not worth only creating a single partial vma - we may as well
2043 * clear enough space for the full object.
2044 */
2045 flags = PIN_MAPPABLE;
2046 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
2047 flags |= PIN_NONBLOCK | PIN_NONFAULT;
2048 2040
2049 /* Now pin it into the GTT as needed */ 2041 /* Now pin it into the GTT as needed */
2050 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 2042 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
2043 PIN_MAPPABLE |
2044 PIN_NONBLOCK |
2045 PIN_NONFAULT);
2051 if (IS_ERR(vma)) { 2046 if (IS_ERR(vma)) {
2052 /* Use a partial view if it is bigger than available space */ 2047 /* Use a partial view if it is bigger than available space */
2053 struct i915_ggtt_view view = 2048 struct i915_ggtt_view view =
2054 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 2049 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
2050 unsigned int flags;
2055 2051
2056 /* Userspace is now writing through an untracked VMA, abandon 2052 flags = PIN_MAPPABLE;
2053 if (view.type == I915_GGTT_VIEW_NORMAL)
2054 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
2055
2056 /*
2057 * Userspace is now writing through an untracked VMA, abandon
2057 * all hope that the hardware is able to track future writes. 2058 * all hope that the hardware is able to track future writes.
2058 */ 2059 */
2059 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 2060 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
2060 2061
2061 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 2062 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
2063 if (IS_ERR(vma) && !view.type) {
2064 flags = PIN_MAPPABLE;
2065 view.type = I915_GGTT_VIEW_PARTIAL;
2066 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
2067 }
2062 } 2068 }
2063 if (IS_ERR(vma)) { 2069 if (IS_ERR(vma)) {
2064 ret = PTR_ERR(vma); 2070 ret = PTR_ERR(vma);
@@ -2933,32 +2939,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2933 return 0; 2939 return 0;
2934} 2940}
2935 2941
2942static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
2943 const struct i915_gem_context *ctx)
2944{
2945 unsigned int score;
2946 unsigned long prev_hang;
2947
2948 if (i915_gem_context_is_banned(ctx))
2949 score = I915_CLIENT_SCORE_CONTEXT_BAN;
2950 else
2951 score = 0;
2952
2953 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
2954 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
2955 score += I915_CLIENT_SCORE_HANG_FAST;
2956
2957 if (score) {
2958 atomic_add(score, &file_priv->ban_score);
2959
2960 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
2961 ctx->name, score,
2962 atomic_read(&file_priv->ban_score));
2963 }
2964}
2965
2936static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2966static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2937{ 2967{
2938 bool banned; 2968 unsigned int score;
2969 bool banned, bannable;
2939 2970
2940 atomic_inc(&ctx->guilty_count); 2971 atomic_inc(&ctx->guilty_count);
2941 2972
2942 banned = false; 2973 bannable = i915_gem_context_is_bannable(ctx);
2943 if (i915_gem_context_is_bannable(ctx)) { 2974 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2944 unsigned int score; 2975 banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
2945 2976
2946 score = atomic_add_return(CONTEXT_SCORE_GUILTY, 2977 DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
2947 &ctx->ban_score); 2978 ctx->name, atomic_read(&ctx->guilty_count),
2948 banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; 2979 score, yesno(banned && bannable));
2949 2980
2950 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2981 /* Cool contexts don't accumulate client ban score */
2951 ctx->name, score, yesno(banned)); 2982 if (!bannable)
2952 }
2953 if (!banned)
2954 return; 2983 return;
2955 2984
2956 i915_gem_context_set_banned(ctx); 2985 if (banned)
2957 if (!IS_ERR_OR_NULL(ctx->file_priv)) { 2986 i915_gem_context_set_banned(ctx);
2958 atomic_inc(&ctx->file_priv->context_bans); 2987
2959 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2988 if (!IS_ERR_OR_NULL(ctx->file_priv))
2960 ctx->name, atomic_read(&ctx->file_priv->context_bans)); 2989 i915_gem_client_mark_guilty(ctx->file_priv, ctx);
2961 }
2962} 2990}
2963 2991
2964static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 2992static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5764,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5736 INIT_LIST_HEAD(&file_priv->mm.request_list); 5764 INIT_LIST_HEAD(&file_priv->mm.request_list);
5737 5765
5738 file_priv->bsd_engine = -1; 5766 file_priv->bsd_engine = -1;
5767 file_priv->hang_timestamp = jiffies;
5739 5768
5740 ret = i915_gem_context_open(i915, file); 5769 ret = i915_gem_context_open(i915, file);
5741 if (ret) 5770 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b3c981..060335d3d9e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
652 652
653static bool client_is_banned(struct drm_i915_file_private *file_priv) 653static bool client_is_banned(struct drm_i915_file_private *file_priv)
654{ 654{
655 return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS; 655 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
656} 656}
657 657
658int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 658int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f627a8c47c58..22df17c8ca9b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
489} 489}
490 490
491static int 491static int
492eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) 492eb_add_vma(struct i915_execbuffer *eb,
493 unsigned int i, unsigned batch_idx,
494 struct i915_vma *vma)
493{ 495{
494 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 496 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
495 int err; 497 int err;
@@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
522 eb->flags[i] = entry->flags; 524 eb->flags[i] = entry->flags;
523 vma->exec_flags = &eb->flags[i]; 525 vma->exec_flags = &eb->flags[i];
524 526
527 /*
528 * SNA is doing fancy tricks with compressing batch buffers, which leads
529 * to negative relocation deltas. Usually that works out ok since the
530 * relocate address is still positive, except when the batch is placed
531 * very low in the GTT. Ensure this doesn't happen.
532 *
533 * Note that actual hangs have only been observed on gen7, but for
534 * paranoia do it everywhere.
535 */
536 if (i == batch_idx) {
537 if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
538 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
539 if (eb->reloc_cache.has_fence)
540 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
541
542 eb->batch = vma;
543 }
544
525 err = 0; 545 err = 0;
526 if (eb_pin_vma(eb, entry, vma)) { 546 if (eb_pin_vma(eb, entry, vma)) {
527 if (entry->offset != vma->node.start) { 547 if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
716{ 736{
717 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; 737 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
718 struct drm_i915_gem_object *obj; 738 struct drm_i915_gem_object *obj;
719 unsigned int i; 739 unsigned int i, batch;
720 int err; 740 int err;
721 741
722 if (unlikely(i915_gem_context_is_closed(eb->ctx))) 742 if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
728 INIT_LIST_HEAD(&eb->relocs); 748 INIT_LIST_HEAD(&eb->relocs);
729 INIT_LIST_HEAD(&eb->unbound); 749 INIT_LIST_HEAD(&eb->unbound);
730 750
751 batch = eb_batch_index(eb);
752
731 for (i = 0; i < eb->buffer_count; i++) { 753 for (i = 0; i < eb->buffer_count; i++) {
732 u32 handle = eb->exec[i].handle; 754 u32 handle = eb->exec[i].handle;
733 struct i915_lut_handle *lut; 755 struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
770 lut->handle = handle; 792 lut->handle = handle;
771 793
772add_vma: 794add_vma:
773 err = eb_add_vma(eb, i, vma); 795 err = eb_add_vma(eb, i, batch, vma);
774 if (unlikely(err)) 796 if (unlikely(err))
775 goto err_vma; 797 goto err_vma;
776 798
777 GEM_BUG_ON(vma != eb->vma[i]); 799 GEM_BUG_ON(vma != eb->vma[i]);
778 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 800 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
801 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
802 eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
779 } 803 }
780 804
781 /* take note of the batch buffer before we might reorder the lists */
782 i = eb_batch_index(eb);
783 eb->batch = eb->vma[i];
784 GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
785
786 /*
787 * SNA is doing fancy tricks with compressing batch buffers, which leads
788 * to negative relocation deltas. Usually that works out ok since the
789 * relocate address is still positive, except when the batch is placed
790 * very low in the GTT. Ensure this doesn't happen.
791 *
792 * Note that actual hangs have only been observed on gen7, but for
793 * paranoia do it everywhere.
794 */
795 if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
796 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
797 if (eb->reloc_cache.has_fence)
798 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
799
800 eb->args->flags |= __EXEC_VALIDATED; 805 eb->args->flags |= __EXEC_VALIDATED;
801 return eb_reserve(eb); 806 return eb_reserve(eb);
802 807
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f9bc3aaa90d0..4a02747ac658 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1893 1893
1894 /* 1894 /*
1895 * Clear the PIPE*STAT regs before the IIR 1895 * Clear the PIPE*STAT regs before the IIR
1896 *
1897 * Toggle the enable bits to make sure we get an
1898 * edge in the ISR pipe event bit if we don't clear
1899 * all the enabled status bits. Otherwise the edge
1900 * triggered IIR on i965/g4x wouldn't notice that
1901 * an interrupt is still pending.
1896 */ 1902 */
1897 if (pipe_stats[pipe]) 1903 if (pipe_stats[pipe]) {
1898 I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1904 I915_WRITE(reg, pipe_stats[pipe]);
1905 I915_WRITE(reg, enable_mask);
1906 }
1899 } 1907 }
1900 spin_unlock(&dev_priv->irq_lock); 1908 spin_unlock(&dev_priv->irq_lock);
1901} 1909}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f11bb213ec07..7720569f2024 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2425,12 +2425,17 @@ enum i915_power_well_id {
2425#define _3D_CHICKEN _MMIO(0x2084) 2425#define _3D_CHICKEN _MMIO(0x2084)
2426#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 2426#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
2427#define _3D_CHICKEN2 _MMIO(0x208c) 2427#define _3D_CHICKEN2 _MMIO(0x208c)
2428
2429#define FF_SLICE_CHICKEN _MMIO(0x2088)
2430#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
2431
2428/* Disables pipelining of read flushes past the SF-WIZ interface. 2432/* Disables pipelining of read flushes past the SF-WIZ interface.
2429 * Required on all Ironlake steppings according to the B-Spec, but the 2433 * Required on all Ironlake steppings according to the B-Spec, but the
2430 * particular danger of not doing so is not specified. 2434 * particular danger of not doing so is not specified.
2431 */ 2435 */
2432# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 2436# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
2433#define _3D_CHICKEN3 _MMIO(0x2090) 2437#define _3D_CHICKEN3 _MMIO(0x2090)
2438#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
2434#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 2439#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
2435#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) 2440#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
2436#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 2441#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9324d476e0a7..0531c01c3604 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj,
109 obj->base.size >> PAGE_SHIFT)); 109 obj->base.size >> PAGE_SHIFT));
110 vma->size = view->partial.size; 110 vma->size = view->partial.size;
111 vma->size <<= PAGE_SHIFT; 111 vma->size <<= PAGE_SHIFT;
112 GEM_BUG_ON(vma->size >= obj->base.size); 112 GEM_BUG_ON(vma->size > obj->base.size);
113 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 113 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
114 vma->size = intel_rotation_info_size(&view->rotated); 114 vma->size = intel_rotation_info_size(&view->rotated);
115 vma->size <<= PAGE_SHIFT; 115 vma->size <<= PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index de0e22322c76..072b326d5ee0 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
304 int max_dotclk = dev_priv->max_dotclk_freq; 304 int max_dotclk = dev_priv->max_dotclk_freq;
305 int max_clock; 305 int max_clock;
306 306
307 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
308 return MODE_NO_DBLESCAN;
309
307 if (mode->clock < 25000) 310 if (mode->clock < 25000)
308 return MODE_CLOCK_LOW; 311 return MODE_CLOCK_LOW;
309 312
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
337 struct intel_crtc_state *pipe_config, 340 struct intel_crtc_state *pipe_config,
338 struct drm_connector_state *conn_state) 341 struct drm_connector_state *conn_state)
339{ 342{
343 struct drm_display_mode *adjusted_mode =
344 &pipe_config->base.adjusted_mode;
345
346 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
347 return false;
348
340 return true; 349 return true;
341} 350}
342 351
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
344 struct intel_crtc_state *pipe_config, 353 struct intel_crtc_state *pipe_config,
345 struct drm_connector_state *conn_state) 354 struct drm_connector_state *conn_state)
346{ 355{
356 struct drm_display_mode *adjusted_mode =
357 &pipe_config->base.adjusted_mode;
358
359 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
360 return false;
361
347 pipe_config->has_pch_encoder = true; 362 pipe_config->has_pch_encoder = true;
348 363
349 return true; 364 return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
354 struct drm_connector_state *conn_state) 369 struct drm_connector_state *conn_state)
355{ 370{
356 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 371 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
372 struct drm_display_mode *adjusted_mode =
373 &pipe_config->base.adjusted_mode;
374
375 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
376 return false;
357 377
358 pipe_config->has_pch_encoder = true; 378 pipe_config->has_pch_encoder = true;
359 379
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dee3a8e659f1..2cc6faa1daa8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14469,12 +14469,22 @@ static enum drm_mode_status
14469intel_mode_valid(struct drm_device *dev, 14469intel_mode_valid(struct drm_device *dev,
14470 const struct drm_display_mode *mode) 14470 const struct drm_display_mode *mode)
14471{ 14471{
14472 /*
14473 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14474 * of DBLSCAN modes to the output's mode list when they detect
14475 * the scaling mode property on the connector. And they don't
14476 * ask the kernel to validate those modes in any way until
14477 * modeset time at which point the client gets a protocol error.
14478 * So in order to not upset those clients we silently ignore the
14479 * DBLSCAN flag on such connectors. For other connectors we will
14480 * reject modes with the DBLSCAN flag in encoder->compute_config().
14481 * And we always reject DBLSCAN modes in connector->mode_valid()
14482 * as we never want such modes on the connector's mode list.
14483 */
14484
14472 if (mode->vscan > 1) 14485 if (mode->vscan > 1)
14473 return MODE_NO_VSCAN; 14486 return MODE_NO_VSCAN;
14474 14487
14475 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
14476 return MODE_NO_DBLESCAN;
14477
14478 if (mode->flags & DRM_MODE_FLAG_HSKEW) 14488 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14479 return MODE_H_ILLEGAL; 14489 return MODE_H_ILLEGAL;
14480 14490
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8320f0e8e3be..16faea30114a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
420 int max_rate, mode_rate, max_lanes, max_link_clock; 420 int max_rate, mode_rate, max_lanes, max_link_clock;
421 int max_dotclk; 421 int max_dotclk;
422 422
423 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
424 return MODE_NO_DBLESCAN;
425
423 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 426 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
424 427
425 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 428 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1862 conn_state->scaling_mode); 1865 conn_state->scaling_mode);
1863 } 1866 }
1864 1867
1865 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1868 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1869 return false;
1870
1871 if (HAS_GMCH_DISPLAY(dev_priv) &&
1866 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1872 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1867 return false; 1873 return false;
1868 1874
@@ -2784,16 +2790,6 @@ static void g4x_disable_dp(struct intel_encoder *encoder,
2784 const struct drm_connector_state *old_conn_state) 2790 const struct drm_connector_state *old_conn_state)
2785{ 2791{
2786 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2792 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2787
2788 /* disable the port before the pipe on g4x */
2789 intel_dp_link_down(encoder, old_crtc_state);
2790}
2791
2792static void ilk_disable_dp(struct intel_encoder *encoder,
2793 const struct intel_crtc_state *old_crtc_state,
2794 const struct drm_connector_state *old_conn_state)
2795{
2796 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2797} 2793}
2798 2794
2799static void vlv_disable_dp(struct intel_encoder *encoder, 2795static void vlv_disable_dp(struct intel_encoder *encoder,
@@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
2807 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2803 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2808} 2804}
2809 2805
2810static void ilk_post_disable_dp(struct intel_encoder *encoder, 2806static void g4x_post_disable_dp(struct intel_encoder *encoder,
2811 const struct intel_crtc_state *old_crtc_state, 2807 const struct intel_crtc_state *old_crtc_state,
2812 const struct drm_connector_state *old_conn_state) 2808 const struct drm_connector_state *old_conn_state)
2813{ 2809{
2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2810 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2815 enum port port = encoder->port; 2811 enum port port = encoder->port;
2816 2812
2813 /*
2814 * Bspec does not list a specific disable sequence for g4x DP.
2815 * Follow the ilk+ sequence (disable pipe before the port) for
2816 * g4x DP as it does not suffer from underruns like the normal
2817 * g4x modeset sequence (disable pipe after the port).
2818 */
2817 intel_dp_link_down(encoder, old_crtc_state); 2819 intel_dp_link_down(encoder, old_crtc_state);
2818 2820
2819 /* Only ilk+ has port A */ 2821 /* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6337 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6339 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6338 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6340 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6339 6341
6340 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 6342 if (!HAS_GMCH_DISPLAY(dev_priv))
6341 connector->interlace_allowed = true; 6343 connector->interlace_allowed = true;
6342 connector->doublescan_allowed = 0; 6344 connector->doublescan_allowed = 0;
6343 6345
@@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
6436 intel_encoder->enable = vlv_enable_dp; 6438 intel_encoder->enable = vlv_enable_dp;
6437 intel_encoder->disable = vlv_disable_dp; 6439 intel_encoder->disable = vlv_disable_dp;
6438 intel_encoder->post_disable = vlv_post_disable_dp; 6440 intel_encoder->post_disable = vlv_post_disable_dp;
6439 } else if (INTEL_GEN(dev_priv) >= 5) {
6440 intel_encoder->pre_enable = g4x_pre_enable_dp;
6441 intel_encoder->enable = g4x_enable_dp;
6442 intel_encoder->disable = ilk_disable_dp;
6443 intel_encoder->post_disable = ilk_post_disable_dp;
6444 } else { 6441 } else {
6445 intel_encoder->pre_enable = g4x_pre_enable_dp; 6442 intel_encoder->pre_enable = g4x_pre_enable_dp;
6446 intel_encoder->enable = g4x_enable_dp; 6443 intel_encoder->enable = g4x_enable_dp;
6447 intel_encoder->disable = g4x_disable_dp; 6444 intel_encoder->disable = g4x_disable_dp;
6445 intel_encoder->post_disable = g4x_post_disable_dp;
6448 } 6446 }
6449 6447
6450 intel_dig_port->dp.output_reg = output_reg; 6448 intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9e6956c08688..5890500a3a8b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
49 DP_DPCD_QUIRK_LIMITED_M_N); 49 DP_DPCD_QUIRK_LIMITED_M_N);
50 50
51 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
52 return false;
53
51 pipe_config->has_pch_encoder = false; 54 pipe_config->has_pch_encoder = false;
52 bpp = 24; 55 bpp = 24;
53 if (intel_dp->compliance.test_data.bpc) { 56 if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
366 if (!intel_dp) 369 if (!intel_dp)
367 return MODE_ERROR; 370 return MODE_ERROR;
368 371
372 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
373 return MODE_NO_DBLESCAN;
374
369 max_link_clock = intel_dp_max_link_rate(intel_dp); 375 max_link_clock = intel_dp_max_link_rate(intel_dp);
370 max_lanes = intel_dp_max_lane_count(intel_dp); 376 max_lanes = intel_dp_max_lane_count(intel_dp);
371 377
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index cf39ca90d887..f349b3920199 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
326 conn_state->scaling_mode); 326 conn_state->scaling_mode);
327 } 327 }
328 328
329 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
330 return false;
331
329 /* DSI uses short packets for sync events, so clear mode flags for DSI */ 332 /* DSI uses short packets for sync events, so clear mode flags for DSI */
330 adjusted_mode->flags = 0; 333 adjusted_mode->flags = 0;
331 334
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
1266 1269
1267 DRM_DEBUG_KMS("\n"); 1270 DRM_DEBUG_KMS("\n");
1268 1271
1272 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1273 return MODE_NO_DBLESCAN;
1274
1269 if (fixed_mode) { 1275 if (fixed_mode) {
1270 if (mode->hdisplay > fixed_mode->hdisplay) 1276 if (mode->hdisplay > fixed_mode->hdisplay)
1271 return MODE_PANEL; 1277 return MODE_PANEL;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a70d767313aa..61d908e0df0e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
220 int target_clock = mode->clock; 220 int target_clock = mode->clock;
221 221
222 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
223 return MODE_NO_DBLESCAN;
224
222 /* XXX: Validate clock range */ 225 /* XXX: Validate clock range */
223 226
224 if (fixed_mode) { 227 if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
254 if (fixed_mode) 257 if (fixed_mode)
255 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 258 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
256 259
260 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
261 return false;
262
257 return true; 263 return true;
258} 264}
259 265
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee929f31f7db..d8cb53ef4351 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1557 bool force_dvi = 1557 bool force_dvi =
1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; 1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
1559 1559
1560 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1561 return MODE_NO_DBLESCAN;
1562
1560 clock = mode->clock; 1563 clock = mode->clock;
1561 1564
1562 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) 1565 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1677 int desired_bpp; 1680 int desired_bpp;
1678 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; 1681 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
1679 1682
1683 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1684 return false;
1685
1680 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1686 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
1681 1687
1682 if (pipe_config->has_hdmi_sink) 1688 if (pipe_config->has_hdmi_sink)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15434cad5430..7c4c8fb1dae4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1547 1547
1548 *batch++ = MI_LOAD_REGISTER_IMM(3);
1549
1548 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1550 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1549 *batch++ = MI_LOAD_REGISTER_IMM(1);
1550 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); 1551 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
1551 *batch++ = _MASKED_BIT_DISABLE( 1552 *batch++ = _MASKED_BIT_DISABLE(
1552 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); 1553 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
1554
1555 /* BSpec: 11391 */
1556 *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
1557 *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
1558
1559 /* BSpec: 11299 */
1560 *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
1561 *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
1562
1553 *batch++ = MI_NOOP; 1563 *batch++ = MI_NOOP;
1554 1564
1555 /* WaClearSlmSpaceAtContextSwitch:kbl */ 1565 /* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2641 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 2651 context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2642 2652
2643 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2653 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2644 if (IS_ERR(ctx_obj)) { 2654 if (IS_ERR(ctx_obj))
2645 ret = PTR_ERR(ctx_obj); 2655 return PTR_ERR(ctx_obj);
2646 goto error_deref_obj;
2647 }
2648 2656
2649 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); 2657 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
2650 if (IS_ERR(vma)) { 2658 if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d278f24ba6ae..48f618dc9abb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; 381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
382 382
383 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
384 return MODE_NO_DBLESCAN;
383 if (mode->hdisplay > fixed_mode->hdisplay) 385 if (mode->hdisplay > fixed_mode->hdisplay)
384 return MODE_PANEL; 386 return MODE_PANEL;
385 if (mode->vdisplay > fixed_mode->vdisplay) 387 if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
429 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 431 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
430 adjusted_mode); 432 adjusted_mode);
431 433
434 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
435 return false;
436
432 if (HAS_PCH_SPLIT(dev_priv)) { 437 if (HAS_PCH_SPLIT(dev_priv)) {
433 pipe_config->has_pch_encoder = true; 438 pipe_config->has_pch_encoder = true;
434 439
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 25005023c243..26975df4e593 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1160 adjusted_mode); 1160 adjusted_mode);
1161 } 1161 }
1162 1162
1163 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1164 return false;
1165
1163 /* 1166 /*
1164 * Make the CRTC code factor in the SDVO pixel multiplier. The 1167 * Make the CRTC code factor in the SDVO pixel multiplier. The
1165 * SDVO device will factor out the multiplier during mode_set. 1168 * SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1621 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1624 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1622 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1625 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1623 1626
1627 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1628 return MODE_NO_DBLESCAN;
1629
1624 if (intel_sdvo->pixel_clock_min > mode->clock) 1630 if (intel_sdvo->pixel_clock_min > mode->clock)
1625 return MODE_CLOCK_LOW; 1631 return MODE_CLOCK_LOW;
1626 1632
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 885fc3809f7f..b55b5c157e38 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); 850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
852 852
853 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
854 return MODE_NO_DBLESCAN;
855
853 if (mode->clock > max_dotclk) 856 if (mode->clock > max_dotclk)
854 return MODE_CLOCK_HIGH; 857 return MODE_CLOCK_HIGH;
855 858
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
877 struct drm_connector_state *conn_state) 880 struct drm_connector_state *conn_state)
878{ 881{
879 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); 882 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
883 struct drm_display_mode *adjusted_mode =
884 &pipe_config->base.adjusted_mode;
880 885
881 if (!tv_mode) 886 if (!tv_mode)
882 return false; 887 return false;
883 888
884 pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; 889 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
890 return false;
891
892 adjusted_mode->crtc_clock = tv_mode->clock;
885 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 893 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
886 pipe_config->pipe_bpp = 8*3; 894 pipe_config->pipe_bpp = 8*3;
887 895
888 /* TV has it's own notion of sync and other mode flags, so clear them. */ 896 /* TV has it's own notion of sync and other mode flags, so clear them. */
889 pipe_config->base.adjusted_mode.flags = 0; 897 adjusted_mode->flags = 0;
890 898
891 /* 899 /*
892 * FIXME: We don't check whether the input mode is actually what we want 900 * FIXME: We don't check whether the input mode is actually what we want
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 32b1a6cdecfc..d3443125e661 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
197 priv->io_base = regs; 197 priv->io_base = regs;
198 198
199 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi"); 199 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
200 if (!res) 200 if (!res) {
201 return -EINVAL; 201 ret = -EINVAL;
202 goto free_drm;
203 }
202 /* Simply ioremap since it may be a shared register zone */ 204 /* Simply ioremap since it may be a shared register zone */
203 regs = devm_ioremap(dev, res->start, resource_size(res)); 205 regs = devm_ioremap(dev, res->start, resource_size(res));
204 if (!regs) { 206 if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
215 } 217 }
216 218
217 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc"); 219 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
218 if (!res) 220 if (!res) {
219 return -EINVAL; 221 ret = -EINVAL;
222 goto free_drm;
223 }
220 /* Simply ioremap since it may be a shared register zone */ 224 /* Simply ioremap since it may be a shared register zone */
221 regs = devm_ioremap(dev, res->start, resource_size(res)); 225 regs = devm_ioremap(dev, res->start, resource_size(res));
222 if (!regs) { 226 if (!regs) {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 291c08117ab6..397143b639c6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
132 132
133 nvif_object_map(&wndw->wimm.base.user, NULL, 0); 133 nvif_object_map(&wndw->wimm.base.user, NULL, 0);
134 wndw->immd = func; 134 wndw->immd = func;
135 wndw->ctxdma.parent = &disp->core->chan.base.user; 135 wndw->ctxdma.parent = NULL;
136 return 0; 136 return 0;
137} 137}
138 138
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 224963b533a6..c5a9bc1af5af 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
444 if (ret) 444 if (ret)
445 return ret; 445 return ret;
446 446
447 ctxdma = nv50_wndw_ctxdma_new(wndw, fb); 447 if (wndw->ctxdma.parent) {
448 if (IS_ERR(ctxdma)) { 448 ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
449 nouveau_bo_unpin(fb->nvbo); 449 if (IS_ERR(ctxdma)) {
450 return PTR_ERR(ctxdma); 450 nouveau_bo_unpin(fb->nvbo);
451 return PTR_ERR(ctxdma);
452 }
453
454 asyw->image.handle[0] = ctxdma->object.handle;
451 } 455 }
452 456
453 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); 457 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
454 asyw->image.handle[0] = ctxdma->object.handle;
455 asyw->image.offset[0] = fb->nvbo->bo.offset; 458 asyw->image.offset[0] = fb->nvbo->bo.offset;
456 459
457 if (wndw->func->prepare) { 460 if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index b8cda9449241..768207fbbae3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
623 struct qxl_cursor_cmd *cmd; 623 struct qxl_cursor_cmd *cmd;
624 struct qxl_cursor *cursor; 624 struct qxl_cursor *cursor;
625 struct drm_gem_object *obj; 625 struct drm_gem_object *obj;
626 struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; 626 struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
627 int ret; 627 int ret;
628 void *user_ptr; 628 void *user_ptr;
629 int size = 64*64*4; 629 int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
677 cursor_bo, 0); 677 cursor_bo, 0);
678 cmd->type = QXL_CURSOR_SET; 678 cmd->type = QXL_CURSOR_SET;
679 679
680 qxl_bo_unref(&qcrtc->cursor_bo); 680 old_cursor_bo = qcrtc->cursor_bo;
681 qcrtc->cursor_bo = cursor_bo; 681 qcrtc->cursor_bo = cursor_bo;
682 cursor_bo = NULL; 682 cursor_bo = NULL;
683 } else { 683 } else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
698 qxl_release_fence_buffer_objects(release); 698 qxl_release_fence_buffer_objects(release);
699 699
700 if (old_cursor_bo)
701 qxl_bo_unref(&old_cursor_bo);
702
700 qxl_bo_unref(&cursor_bo); 703 qxl_bo_unref(&cursor_bo);
701 704
702 return; 705 return;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 08747fc3ee71..8232b39e16ca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,7 +17,6 @@
17#include <drm/drm_encoder.h> 17#include <drm/drm_encoder.h>
18#include <drm/drm_modes.h> 18#include <drm/drm_modes.h>
19#include <drm/drm_of.h> 19#include <drm/drm_of.h>
20#include <drm/drm_panel.h>
21 20
22#include <uapi/drm/drm_mode.h> 21#include <uapi/drm/drm_mode.h>
23 22
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
418static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 417static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
419 const struct drm_display_mode *mode) 418 const struct drm_display_mode *mode)
420{ 419{
421 struct drm_panel *panel = tcon->panel;
422 struct drm_connector *connector = panel->connector;
423 struct drm_display_info display_info = connector->display_info;
424 unsigned int bp, hsync, vsync; 420 unsigned int bp, hsync, vsync;
425 u8 clk_delay; 421 u8 clk_delay;
426 u32 val = 0; 422 u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
478 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 474 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
479 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 475 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
480 476
481 /*
482 * On A20 and similar SoCs, the only way to achieve Positive Edge
483 * (Rising Edge), is setting dclk clock phase to 2/3(240°).
484 * By default TCON works in Negative Edge(Falling Edge),
485 * this is why phase is set to 0 in that case.
486 * Unfortunately there's no way to logically invert dclk through
487 * IO_POL register.
488 * The only acceptable way to work, triple checked with scope,
489 * is using clock phase set to 0° for Negative Edge and set to 240°
490 * for Positive Edge.
491 * On A33 and similar SoCs there would be a 90° phase option,
492 * but it divides also dclk by 2.
493 * Following code is a way to avoid quirks all around TCON
494 * and DOTCLOCK drivers.
495 */
496 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
497 clk_set_phase(tcon->dclk, 240);
498
499 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
500 clk_set_phase(tcon->dclk, 0);
501
502 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 477 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
503 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, 478 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
504 val); 479 val);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 2ebdc6d5a76e..d5583190f3e4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
137 137
138 if (cmd > (char *) urb->transfer_buffer) { 138 if (cmd > (char *) urb->transfer_buffer) {
139 /* Send partial buffer remaining before exiting */ 139 /* Send partial buffer remaining before exiting */
140 int len = cmd - (char *) urb->transfer_buffer; 140 int len;
141 if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
142 *cmd++ = 0xAF;
143 len = cmd - (char *) urb->transfer_buffer;
141 ret = udl_submit_urb(dev, urb, len); 144 ret = udl_submit_urb(dev, urb, len);
142 bytes_sent += len; 145 bytes_sent += len;
143 } else 146 } else
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 0c87b1ac6b68..b992644c17e6 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -153,11 +153,11 @@ static void udl_compress_hline16(
153 raw_pixels_count_byte = cmd++; /* we'll know this later */ 153 raw_pixels_count_byte = cmd++; /* we'll know this later */
154 raw_pixel_start = pixel; 154 raw_pixel_start = pixel;
155 155
156 cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, 156 cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
157 min((int)(pixel_end - pixel) / bpp, 157 (unsigned long)(pixel_end - pixel) / bpp,
158 (int)(cmd_buffer_end - cmd) / 2))) * bpp; 158 (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
159 159
160 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); 160 prefetch_range((void *) pixel, cmd_pixel_end - pixel);
161 pixel_val16 = get_pixel_val16(pixel, bpp); 161 pixel_val16 = get_pixel_val16(pixel, bpp);
162 162
163 while (pixel < cmd_pixel_end) { 163 while (pixel < cmd_pixel_end) {
@@ -193,6 +193,9 @@ static void udl_compress_hline16(
193 if (pixel > raw_pixel_start) { 193 if (pixel > raw_pixel_start) {
194 /* finalize last RAW span */ 194 /* finalize last RAW span */
195 *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; 195 *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
196 } else {
197 /* undo unused byte */
198 cmd--;
196 } 199 }
197 200
198 *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; 201 *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f858cc72011d..3942ee61bd1c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev)
1952 } 1952 }
1953 hdev->io_started = false; 1953 hdev->io_started = false;
1954 1954
1955 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
1956
1955 if (!hdev->driver) { 1957 if (!hdev->driver) {
1956 id = hid_match_device(hdev, hdrv); 1958 id = hid_match_device(hdev, hdrv);
1957 if (id == NULL) { 1959 if (id == NULL) {
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2215 struct hid_device *hdev = to_hid_device(dev); 2217 struct hid_device *hdev = to_hid_device(dev);
2216 2218
2217 if (hdev->driver == hdrv && 2219 if (hdev->driver == hdrv &&
2218 !hdrv->match(hdev, hid_ignore_special_drivers)) 2220 !hdrv->match(hdev, hid_ignore_special_drivers) &&
2221 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2219 return device_reprobe(dev); 2222 return device_reprobe(dev);
2220 2223
2221 return 0; 2224 return 0;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8469b6964ff6..b48100236df8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1154,6 +1154,8 @@ copy_rest:
1154 goto out; 1154 goto out;
1155 if (list->tail > list->head) { 1155 if (list->tail > list->head) {
1156 len = list->tail - list->head; 1156 len = list->tail - list->head;
1157 if (len > count)
1158 len = count;
1157 1159
1158 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { 1160 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1159 ret = -EFAULT; 1161 ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
1163 list->head += len; 1165 list->head += len;
1164 } else { 1166 } else {
1165 len = HID_DEBUG_BUFSIZE - list->head; 1167 len = HID_DEBUG_BUFSIZE - list->head;
1168 if (len > count)
1169 len = count;
1166 1170
1167 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { 1171 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1168 ret = -EFAULT; 1172 ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
1170 } 1174 }
1171 list->head = 0; 1175 list->head = 0;
1172 ret += len; 1176 ret += len;
1173 goto copy_rest; 1177 count -= len;
1178 if (count > 0)
1179 goto copy_rest;
1174 } 1180 }
1175 1181
1176 } 1182 }
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 7b8e17b03cb8..6bf4da7ad63a 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
124 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) }, 124 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
125 { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, 125 { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
126 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) }, 126 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
127 { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
128 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
127 { } 129 { }
128}; 130};
129MODULE_DEVICE_TABLE(hid, hammer_devices); 131MODULE_DEVICE_TABLE(hid, hammer_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index a85634fe033f..c7981ddd8776 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -452,6 +452,7 @@
452#define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE 0x5028 452#define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE 0x5028
453#define USB_DEVICE_ID_GOOGLE_STAFF 0x502b 453#define USB_DEVICE_ID_GOOGLE_STAFF 0x502b
454#define USB_DEVICE_ID_GOOGLE_WAND 0x502d 454#define USB_DEVICE_ID_GOOGLE_WAND 0x502d
455#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
455 456
456#define USB_VENDOR_ID_GOTOP 0x08f2 457#define USB_VENDOR_ID_GOTOP 0x08f2
457#define USB_DEVICE_ID_SUPER_Q2 0x007f 458#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index cb86cc834201..0422ec2b13d2 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
573 573
574static int steam_client_ll_parse(struct hid_device *hdev) 574static int steam_client_ll_parse(struct hid_device *hdev)
575{ 575{
576 struct steam_device *steam = hid_get_drvdata(hdev); 576 struct steam_device *steam = hdev->driver_data;
577 577
578 return hid_parse_report(hdev, steam->hdev->dev_rdesc, 578 return hid_parse_report(hdev, steam->hdev->dev_rdesc,
579 steam->hdev->dev_rsize); 579 steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
590 590
591static int steam_client_ll_open(struct hid_device *hdev) 591static int steam_client_ll_open(struct hid_device *hdev)
592{ 592{
593 struct steam_device *steam = hid_get_drvdata(hdev); 593 struct steam_device *steam = hdev->driver_data;
594 int ret; 594 int ret;
595 595
596 ret = hid_hw_open(steam->hdev); 596 ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
605 605
606static void steam_client_ll_close(struct hid_device *hdev) 606static void steam_client_ll_close(struct hid_device *hdev)
607{ 607{
608 struct steam_device *steam = hid_get_drvdata(hdev); 608 struct steam_device *steam = hdev->driver_data;
609 609
610 mutex_lock(&steam->mutex); 610 mutex_lock(&steam->mutex);
611 steam->client_opened = false; 611 steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
623 size_t count, unsigned char report_type, 623 size_t count, unsigned char report_type,
624 int reqtype) 624 int reqtype)
625{ 625{
626 struct steam_device *steam = hid_get_drvdata(hdev); 626 struct steam_device *steam = hdev->driver_data;
627 627
628 return hid_hw_raw_request(steam->hdev, reportnum, buf, count, 628 return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
629 report_type, reqtype); 629 report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
710 ret = PTR_ERR(steam->client_hdev); 710 ret = PTR_ERR(steam->client_hdev);
711 goto client_hdev_fail; 711 goto client_hdev_fail;
712 } 712 }
713 hid_set_drvdata(steam->client_hdev, steam); 713 steam->client_hdev->driver_data = steam;
714 714
715 /* 715 /*
716 * With the real steam controller interface, do not connect hidraw. 716 * With the real steam controller interface, do not connect hidraw.
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1652bb7bd15..eae0cb3ddec6 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
484 return; 484 return;
485 } 485 }
486 486
487 if ((ret_size > size) || (ret_size <= 2)) { 487 if ((ret_size > size) || (ret_size < 2)) {
488 dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", 488 dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
489 __func__, size, ret_size); 489 __func__, size, ret_size);
490 return; 490 return;
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 582e449be9fe..a2c53ea3b5ed 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
205 kfree(ishtp_dev); 205 kfree(ishtp_dev);
206} 206}
207 207
208#ifdef CONFIG_PM 208static struct device __maybe_unused *ish_resume_device;
209static struct device *ish_resume_device;
210 209
211/* 50ms to get resume response */ 210/* 50ms to get resume response */
212#define WAIT_FOR_RESUME_ACK_MS 50 211#define WAIT_FOR_RESUME_ACK_MS 50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
220 * in that case a simple resume message is enough, others we need 219 * in that case a simple resume message is enough, others we need
221 * a reset sequence. 220 * a reset sequence.
222 */ 221 */
223static void ish_resume_handler(struct work_struct *work) 222static void __maybe_unused ish_resume_handler(struct work_struct *work)
224{ 223{
225 struct pci_dev *pdev = to_pci_dev(ish_resume_device); 224 struct pci_dev *pdev = to_pci_dev(ish_resume_device);
226 struct ishtp_device *dev = pci_get_drvdata(pdev); 225 struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
262 * 261 *
263 * Return: 0 to the pm core 262 * Return: 0 to the pm core
264 */ 263 */
265static int ish_suspend(struct device *device) 264static int __maybe_unused ish_suspend(struct device *device)
266{ 265{
267 struct pci_dev *pdev = to_pci_dev(device); 266 struct pci_dev *pdev = to_pci_dev(device);
268 struct ishtp_device *dev = pci_get_drvdata(pdev); 267 struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
288 return 0; 287 return 0;
289} 288}
290 289
291static DECLARE_WORK(resume_work, ish_resume_handler); 290static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
292/** 291/**
293 * ish_resume() - ISH resume callback 292 * ish_resume() - ISH resume callback
294 * @device: device pointer 293 * @device: device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
297 * 296 *
298 * Return: 0 to the pm core 297 * Return: 0 to the pm core
299 */ 298 */
300static int ish_resume(struct device *device) 299static int __maybe_unused ish_resume(struct device *device)
301{ 300{
302 struct pci_dev *pdev = to_pci_dev(device); 301 struct pci_dev *pdev = to_pci_dev(device);
303 struct ishtp_device *dev = pci_get_drvdata(pdev); 302 struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
311 return 0; 310 return 0;
312} 311}
313 312
314static const struct dev_pm_ops ish_pm_ops = { 313static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
315 .suspend = ish_suspend,
316 .resume = ish_resume,
317};
318#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
319#else
320#define ISHTP_ISH_PM_OPS NULL
321#endif /* CONFIG_PM */
322 314
323static struct pci_driver ish_driver = { 315static struct pci_driver ish_driver = {
324 .name = KBUILD_MODNAME, 316 .name = KBUILD_MODNAME,
325 .id_table = ish_pci_tbl, 317 .id_table = ish_pci_tbl,
326 .probe = ish_probe, 318 .probe = ish_probe,
327 .remove = ish_remove, 319 .remove = ish_remove,
328 .driver.pm = ISHTP_ISH_PM_OPS, 320 .driver.pm = &ish_pm_ops,
329}; 321};
330 322
331module_pci_driver(ish_driver); 323module_pci_driver(ish_driver);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e3ce233f8bdc..23872d08308c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -36,6 +36,7 @@
36#include <linux/hiddev.h> 36#include <linux/hiddev.h>
37#include <linux/compat.h> 37#include <linux/compat.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/nospec.h>
39#include "usbhid.h" 40#include "usbhid.h"
40 41
41#ifdef CONFIG_USB_DYNAMIC_MINORS 42#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
469 470
470 if (uref->field_index >= report->maxfield) 471 if (uref->field_index >= report->maxfield)
471 goto inval; 472 goto inval;
473 uref->field_index = array_index_nospec(uref->field_index,
474 report->maxfield);
472 475
473 field = report->field[uref->field_index]; 476 field = report->field[uref->field_index];
474 if (uref->usage_index >= field->maxusage) 477 if (uref->usage_index >= field->maxusage)
475 goto inval; 478 goto inval;
479 uref->usage_index = array_index_nospec(uref->usage_index,
480 field->maxusage);
476 481
477 uref->usage_code = field->usage[uref->usage_index].hid; 482 uref->usage_code = field->usage[uref->usage_index].hid;
478 483
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
499 504
500 if (uref->field_index >= report->maxfield) 505 if (uref->field_index >= report->maxfield)
501 goto inval; 506 goto inval;
507 uref->field_index = array_index_nospec(uref->field_index,
508 report->maxfield);
502 509
503 field = report->field[uref->field_index]; 510 field = report->field[uref->field_index];
504 511
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
753 760
754 if (finfo.field_index >= report->maxfield) 761 if (finfo.field_index >= report->maxfield)
755 break; 762 break;
763 finfo.field_index = array_index_nospec(finfo.field_index,
764 report->maxfield);
756 765
757 field = report->field[finfo.field_index]; 766 field = report->field[finfo.field_index];
758 memset(&finfo, 0, sizeof(finfo)); 767 memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
797 806
798 if (cinfo.index >= hid->maxcollection) 807 if (cinfo.index >= hid->maxcollection)
799 break; 808 break;
809 cinfo.index = array_index_nospec(cinfo.index,
810 hid->maxcollection);
800 811
801 cinfo.type = hid->collection[cinfo.index].type; 812 cinfo.type = hid->collection[cinfo.index].type;
802 cinfo.usage = hid->collection[cinfo.index].usage; 813 cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index c101369b51de..d6797535fff9 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
395 } 395 }
396 } 396 }
397 397
398 /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
399 if (hdev->vendor == USB_VENDOR_ID_WACOM &&
400 hdev->product == 0x0358 &&
401 WACOM_PEN_FIELD(field) &&
402 wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
403 field->logical_maximum = 43200;
404 }
405
398 switch (usage->hid) { 406 switch (usage->hid) {
399 case HID_GD_X: 407 case HID_GD_X:
400 features->x_max = field->logical_maximum; 408 features->x_max = field->logical_maximum;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0bb44d0088ed..ad7afa74d365 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
3365 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) 3365 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
3366 features->device_type |= WACOM_DEVICETYPE_PAD; 3366 features->device_type |= WACOM_DEVICETYPE_PAD;
3367 3367
3368 features->x_max = 4096; 3368 if (features->type == INTUOSHT2) {
3369 features->y_max = 4096; 3369 features->x_max = features->x_max / 10;
3370 features->y_max = features->y_max / 10;
3371 }
3372 else {
3373 features->x_max = 4096;
3374 features->y_max = 4096;
3375 }
3370 } 3376 }
3371 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { 3377 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
3372 features->device_type |= WACOM_DEVICETYPE_PAD; 3378 features->device_type |= WACOM_DEVICETYPE_PAD;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index bf3bb7e1adab..9d3ef879dc51 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
1074 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"), 1074 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
1075 }, 1075 },
1076 }, 1076 },
1077 {
1078 .ident = "Dell XPS13 9333",
1079 .matches = {
1080 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1081 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
1082 },
1083 },
1077 { } 1084 { }
1078}; 1085};
1079 1086
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 155d4d1d1585..f9d1349c3286 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev)
4175 * The temperature is already monitored if the respective bit in <mask> 4175 * The temperature is already monitored if the respective bit in <mask>
4176 * is set. 4176 * is set.
4177 */ 4177 */
4178 for (i = 0; i < 32; i++) { 4178 for (i = 0; i < 31; i++) {
4179 if (!(data->temp_mask & BIT(i + 1))) 4179 if (!(data->temp_mask & BIT(i + 1)))
4180 continue; 4180 continue;
4181 if (!reg_temp_alternate[i]) 4181 if (!reg_temp_alternate[i])
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 4a34f311e1ff..6ec65adaba49 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
647 if (bit_adap->getscl == NULL) 647 if (bit_adap->getscl == NULL)
648 adap->quirks = &i2c_bit_quirk_no_clk_stretch; 648 adap->quirks = &i2c_bit_quirk_no_clk_stretch;
649 649
650 /* Bring bus to a known state. Looks like STOP if bus is not free yet */ 650 /*
651 setscl(bit_adap, 1); 651 * We tried forcing SCL/SDA to an initial state here. But that caused a
652 udelay(bit_adap->udelay); 652 * regression, sadly. Check Bugzilla #200045 for details.
653 setsda(bit_adap, 1); 653 */
654 654
655 ret = add_adapter(adap); 655 ret = add_adapter(adap);
656 if (ret < 0) 656 if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 44cffad43701..c4d176f5ed79 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
234 .name = "cht_wc_ext_chrg_irq_chip", 234 .name = "cht_wc_ext_chrg_irq_chip",
235}; 235};
236 236
237static const char * const bq24190_suppliers[] = { "fusb302-typec-source" }; 237static const char * const bq24190_suppliers[] = {
238 "tcpm-source-psy-i2c-fusb302" };
238 239
239static const struct property_entry bq24190_props[] = { 240static const struct property_entry bq24190_props[] = {
240 PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), 241 PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 005e6e0330c2..66f85bbf3591 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
279 * required for an I2C bus. 279 * required for an I2C bus.
280 */ 280 */
281 if (pdata->scl_is_open_drain) 281 if (pdata->scl_is_open_drain)
282 gflags = GPIOD_OUT_LOW; 282 gflags = GPIOD_OUT_HIGH;
283 else 283 else
284 gflags = GPIOD_OUT_LOW_OPEN_DRAIN; 284 gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
285 priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags); 285 priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
286 if (IS_ERR(priv->scl)) 286 if (IS_ERR(priv->scl))
287 return PTR_ERR(priv->scl); 287 return PTR_ERR(priv->scl);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index e866c481bfc3..fce52bdab2b7 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -127,7 +127,7 @@ enum stu300_error {
127 127
128/* 128/*
129 * The number of address send athemps tried before giving up. 129 * The number of address send athemps tried before giving up.
130 * If the first one failes it seems like 5 to 8 attempts are required. 130 * If the first one fails it seems like 5 to 8 attempts are required.
131 */ 131 */
132#define NUM_ADDR_RESEND_ATTEMPTS 12 132#define NUM_ADDR_RESEND_ATTEMPTS 12
133 133
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 5fccd1f1bca8..797def5319f1 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
545{ 545{
546 u32 cnfg; 546 u32 cnfg;
547 547
548 /*
549 * NACK interrupt is generated before the I2C controller generates
550 * the STOP condition on the bus. So wait for 2 clock periods
551 * before disabling the controller so that the STOP condition has
552 * been delivered properly.
553 */
554 udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
555
548 cnfg = i2c_readl(i2c_dev, I2C_CNFG); 556 cnfg = i2c_readl(i2c_dev, I2C_CNFG);
549 if (cnfg & I2C_CNFG_PACKET_MODE_EN) 557 if (cnfg & I2C_CNFG_PACKET_MODE_EN)
550 i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); 558 i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
706 if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) 714 if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
707 return 0; 715 return 0;
708 716
709 /*
710 * NACK interrupt is generated before the I2C controller generates
711 * the STOP condition on the bus. So wait for 2 clock periods
712 * before resetting the controller so that the STOP condition has
713 * been delivered properly.
714 */
715 if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
716 udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
717
718 tegra_i2c_init(i2c_dev); 717 tegra_i2c_init(i2c_dev);
719 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { 718 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
720 if (msg->flags & I2C_M_IGNORE_NAK) 719 if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 31d16ada6e7d..301285c54603 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
198 198
199 val = !val; 199 val = !val;
200 bri->set_scl(adap, val); 200 bri->set_scl(adap, val);
201 ndelay(RECOVERY_NDELAY); 201
202 /*
203 * If we can set SDA, we will always create STOP here to ensure
204 * the additional pulses will do no harm. This is achieved by
205 * letting SDA follow SCL half a cycle later.
206 */
207 ndelay(RECOVERY_NDELAY / 2);
208 if (bri->set_sda)
209 bri->set_sda(adap, val);
210 ndelay(RECOVERY_NDELAY / 2);
202 } 211 }
203 212
204 /* check if recovery actually succeeded */ 213 /* check if recovery actually succeeded */
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index f3f683041e7f..51970bae3c4a 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
465 465
466 status = i2c_transfer(adapter, msg, num); 466 status = i2c_transfer(adapter, msg, num);
467 if (status < 0) 467 if (status < 0)
468 return status; 468 goto cleanup;
469 if (status != num) 469 if (status != num) {
470 return -EIO; 470 status = -EIO;
471 goto cleanup;
472 }
473 status = 0;
471 474
472 /* Check PEC if last message is a read */ 475 /* Check PEC if last message is a read */
473 if (i && (msg[num-1].flags & I2C_M_RD)) { 476 if (i && (msg[num-1].flags & I2C_M_RD)) {
474 status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); 477 status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
475 if (status < 0) 478 if (status < 0)
476 return status; 479 goto cleanup;
477 } 480 }
478 481
479 if (read_write == I2C_SMBUS_READ) 482 if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
499 break; 502 break;
500 } 503 }
501 504
505cleanup:
502 if (msg[0].flags & I2C_M_DMA_SAFE) 506 if (msg[0].flags & I2C_M_DMA_SAFE)
503 kfree(msg[0].buf); 507 kfree(msg[0].buf);
504 if (msg[1].flags & I2C_M_DMA_SAFE) 508 if (msg[1].flags & I2C_M_DMA_SAFE)
505 kfree(msg[1].buf); 509 kfree(msg[1].buf);
506 510
507 return 0; 511 return status;
508} 512}
509 513
510/** 514/**
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7e3d82cff3d5..c149c9c360fc 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
1053 if (src < 0) 1053 if (src < 0)
1054 return IRQ_NONE; 1054 return IRQ_NONE;
1055 1055
1056 if (!(src & data->chip_info->enabled_events)) 1056 if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
1057 return IRQ_NONE; 1057 return IRQ_NONE;
1058 1058
1059 if (src & MMA8452_INT_DRDY) { 1059 if (src & MMA8452_INT_DRDY) {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f9c0624505a2..42618fe4f83e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
959 } 959 }
960 960
961 irq_type = irqd_get_trigger_type(desc); 961 irq_type = irqd_get_trigger_type(desc);
962 if (!irq_type)
963 irq_type = IRQF_TRIGGER_RISING;
962 if (irq_type == IRQF_TRIGGER_RISING) 964 if (irq_type == IRQF_TRIGGER_RISING)
963 st->irq_mask = INV_MPU6050_ACTIVE_HIGH; 965 st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
964 else if (irq_type == IRQF_TRIGGER_FALLING) 966 else if (irq_type == IRQF_TRIGGER_FALLING)
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 34d42a2504c9..df5b2a0da96c 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
582 "%s: failed to get lux\n", __func__); 582 "%s: failed to get lux\n", __func__);
583 return lux_val; 583 return lux_val;
584 } 584 }
585 if (lux_val == 0)
586 return -ERANGE;
585 587
586 ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) / 588 ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
587 lux_val; 589 lux_val;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 5ec3e41b65f2..fe87d27779d9 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
415 } 415 }
416 comp_humidity = bmp280_compensate_humidity(data, adc_humidity); 416 comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
417 417
418 *val = comp_humidity; 418 *val = comp_humidity * 1000 / 1024;
419 *val2 = 1024;
420 419
421 return IIO_VAL_FRACTIONAL; 420 return IIO_VAL_INT;
422} 421}
423 422
424static int bmp280_read_raw(struct iio_dev *indio_dev, 423static int bmp280_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3e90b6a1d9d2..cc06e8404e9b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3488 struct ib_flow_attr *flow_attr; 3488 struct ib_flow_attr *flow_attr;
3489 struct ib_qp *qp; 3489 struct ib_qp *qp;
3490 struct ib_uflow_resources *uflow_res; 3490 struct ib_uflow_resources *uflow_res;
3491 struct ib_uverbs_flow_spec_hdr *kern_spec;
3491 int err = 0; 3492 int err = 0;
3492 void *kern_spec;
3493 void *ib_spec; 3493 void *ib_spec;
3494 int i; 3494 int i;
3495 3495
@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3538 if (!kern_flow_attr) 3538 if (!kern_flow_attr)
3539 return -ENOMEM; 3539 return -ENOMEM;
3540 3540
3541 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3541 *kern_flow_attr = cmd.flow_attr;
3542 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3542 err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
3543 cmd.flow_attr.size); 3543 cmd.flow_attr.size);
3544 if (err) 3544 if (err)
3545 goto err_free_attr; 3545 goto err_free_attr;
@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3559 goto err_uobj; 3559 goto err_uobj;
3560 } 3560 }
3561 3561
3562 if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3563 err = -EINVAL;
3564 goto err_put;
3565 }
3566
3562 flow_attr = kzalloc(struct_size(flow_attr, flows, 3567 flow_attr = kzalloc(struct_size(flow_attr, flows,
3563 cmd.flow_attr.num_of_specs), GFP_KERNEL); 3568 cmd.flow_attr.num_of_specs), GFP_KERNEL);
3564 if (!flow_attr) { 3569 if (!flow_attr) {
@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3578 flow_attr->flags = kern_flow_attr->flags; 3583 flow_attr->flags = kern_flow_attr->flags;
3579 flow_attr->size = sizeof(*flow_attr); 3584 flow_attr->size = sizeof(*flow_attr);
3580 3585
3581 kern_spec = kern_flow_attr + 1; 3586 kern_spec = kern_flow_attr->flow_specs;
3582 ib_spec = flow_attr + 1; 3587 ib_spec = flow_attr + 1;
3583 for (i = 0; i < flow_attr->num_of_specs && 3588 for (i = 0; i < flow_attr->num_of_specs &&
3584 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3589 cmd.flow_attr.size >= sizeof(*kern_spec) &&
3585 cmd.flow_attr.size >= 3590 cmd.flow_attr.size >= kern_spec->size;
3586 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3591 i++) {
3587 err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, 3592 err = kern_spec_to_ib_spec(
3588 uflow_res); 3593 file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
3594 ib_spec, uflow_res);
3589 if (err) 3595 if (err)
3590 goto err_free; 3596 goto err_free;
3591 3597
3592 flow_attr->size += 3598 flow_attr->size +=
3593 ((union ib_flow_spec *) ib_spec)->size; 3599 ((union ib_flow_spec *) ib_spec)->size;
3594 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3600 cmd.flow_attr.size -= kern_spec->size;
3595 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3601 kern_spec = ((void *)kern_spec) + kern_spec->size;
3596 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3602 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3597 } 3603 }
3598 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3604 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3ae2339dd27a..2094d136513d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
736 if (ret) 736 if (ret)
737 return ret; 737 return ret;
738 738
739 if (!file->ucontext &&
740 (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
741 return -EINVAL;
742
743 if (extended) { 739 if (extended) {
744 if (count < (sizeof(hdr) + sizeof(ex_hdr))) 740 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
745 return -EINVAL; 741 return -EINVAL;
@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
759 goto out; 755 goto out;
760 } 756 }
761 757
758 /*
759 * Must be after the ib_dev check, as once the RCU clears ib_dev ==
760 * NULL means ucontext == NULL
761 */
762 if (!file->ucontext &&
763 (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
764 ret = -EINVAL;
765 goto out;
766 }
767
762 if (!verify_command_mask(ib_dev, command, extended)) { 768 if (!verify_command_mask(ib_dev, command, extended)) {
763 ret = -EOPNOTSUPP; 769 ret = -EOPNOTSUPP;
764 goto out; 770 goto out;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 0b56828c1319..9d6beb948535 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
1562 1562
1563/* Completion queues */ 1563/* Completion queues */
1564 1564
1565struct ib_cq *ib_create_cq(struct ib_device *device, 1565struct ib_cq *__ib_create_cq(struct ib_device *device,
1566 ib_comp_handler comp_handler, 1566 ib_comp_handler comp_handler,
1567 void (*event_handler)(struct ib_event *, void *), 1567 void (*event_handler)(struct ib_event *, void *),
1568 void *cq_context, 1568 void *cq_context,
1569 const struct ib_cq_init_attr *cq_attr) 1569 const struct ib_cq_init_attr *cq_attr,
1570 const char *caller)
1570{ 1571{
1571 struct ib_cq *cq; 1572 struct ib_cq *cq;
1572 1573
@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
1580 cq->cq_context = cq_context; 1581 cq->cq_context = cq_context;
1581 atomic_set(&cq->usecnt, 0); 1582 atomic_set(&cq->usecnt, 0);
1582 cq->res.type = RDMA_RESTRACK_CQ; 1583 cq->res.type = RDMA_RESTRACK_CQ;
1584 cq->res.kern_name = caller;
1583 rdma_restrack_add(&cq->res); 1585 rdma_restrack_add(&cq->res);
1584 } 1586 }
1585 1587
1586 return cq; 1588 return cq;
1587} 1589}
1588EXPORT_SYMBOL(ib_create_cq); 1590EXPORT_SYMBOL(__ib_create_cq);
1589 1591
1590int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) 1592int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1591{ 1593{
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 1445918e3239..7b76e6f81aeb 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
774{ 774{
775 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 775 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
776 776
777 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 777 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
778 return -ENOMEM; 778 return -ENOMEM;
779 779
780 mhp->mpl[mhp->mpl_len++] = addr; 780 mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a1a47ac53c6..f15c93102081 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
271 271
272 lockdep_assert_held(&qp->s_lock); 272 lockdep_assert_held(&qp->s_lock);
273 ps->s_txreq = get_txreq(ps->dev, qp); 273 ps->s_txreq = get_txreq(ps->dev, qp);
274 if (IS_ERR(ps->s_txreq)) 274 if (!ps->s_txreq)
275 goto bail_no_tx; 275 goto bail_no_tx;
276 276
277 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { 277 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index b7b671017e59..e254dcec6f64 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
72 int middle = 0; 72 int middle = 0;
73 73
74 ps->s_txreq = get_txreq(ps->dev, qp); 74 ps->s_txreq = get_txreq(ps->dev, qp);
75 if (IS_ERR(ps->s_txreq)) 75 if (!ps->s_txreq)
76 goto bail_no_tx; 76 goto bail_no_tx;
77 77
78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1ab332f1866e..70d39fc450a1 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
503 u32 lid; 503 u32 lid;
504 504
505 ps->s_txreq = get_txreq(ps->dev, qp); 505 ps->s_txreq = get_txreq(ps->dev, qp);
506 if (IS_ERR(ps->s_txreq)) 506 if (!ps->s_txreq)
507 goto bail_no_tx; 507 goto bail_no_tx;
508 508
509 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { 509 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 873e48ea923f..c4ab2d5b4502 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2016 - 2017 Intel Corporation. 2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
94 struct rvt_qp *qp) 94 struct rvt_qp *qp)
95 __must_hold(&qp->s_lock) 95 __must_hold(&qp->s_lock)
96{ 96{
97 struct verbs_txreq *tx = ERR_PTR(-EBUSY); 97 struct verbs_txreq *tx = NULL;
98 98
99 write_seqlock(&dev->txwait_lock); 99 write_seqlock(&dev->txwait_lock);
100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 729244c3086c..1c19bbc764b2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2016 Intel Corporation. 2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
83 if (unlikely(!tx)) { 83 if (unlikely(!tx)) {
84 /* call slow path to get the lock */ 84 /* call slow path to get the lock */
85 tx = __get_txreq(dev, qp); 85 tx = __get_txreq(dev, qp);
86 if (IS_ERR(tx)) 86 if (!tx)
87 return tx; 87 return tx;
88 } 88 }
89 tx->qp = qp; 89 tx->qp = qp;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index ed1f253faf97..c7c85c22e4e3 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
486 } 486 }
487 487
488 if (flags & IB_MR_REREG_ACCESS) { 488 if (flags & IB_MR_REREG_ACCESS) {
489 if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) 489 if (ib_access_writable(mr_access_flags) &&
490 return -EPERM; 490 !mmr->umem->writable) {
491 err = -EPERM;
492 goto release_mpt_entry;
493 }
491 494
492 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, 495 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
493 convert_access(mr_access_flags)); 496 convert_access(mr_access_flags));
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e52dd21519b4..b3ba9a222550 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
3199 if (!mcounters->hw_cntrs_hndl) { 3199 if (!mcounters->hw_cntrs_hndl) {
3200 mcounters->hw_cntrs_hndl = mlx5_fc_create( 3200 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3201 to_mdev(ibcounters->device)->mdev, false); 3201 to_mdev(ibcounters->device)->mdev, false);
3202 if (!mcounters->hw_cntrs_hndl) { 3202 if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3203 ret = -ENOMEM; 3203 ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3204 goto free; 3204 goto free;
3205 } 3205 }
3206 hw_hndl = true; 3206 hw_hndl = true;
@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3546 return ERR_PTR(-ENOMEM); 3546 return ERR_PTR(-ENOMEM);
3547 3547
3548 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz); 3548 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3549 if (err) { 3549 if (err)
3550 kfree(ucmd); 3550 goto free_ucmd;
3551 return ERR_PTR(err);
3552 }
3553 } 3551 }
3554 3552
3555 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) 3553 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3556 return ERR_PTR(-ENOMEM); 3554 err = -ENOMEM;
3555 goto free_ucmd;
3556 }
3557 3557
3558 if (domain != IB_FLOW_DOMAIN_USER || 3558 if (domain != IB_FLOW_DOMAIN_USER ||
3559 flow_attr->port > dev->num_ports || 3559 flow_attr->port > dev->num_ports ||
3560 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | 3560 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3561 IB_FLOW_ATTR_FLAGS_EGRESS))) 3561 IB_FLOW_ATTR_FLAGS_EGRESS))) {
3562 return ERR_PTR(-EINVAL); 3562 err = -EINVAL;
3563 goto free_ucmd;
3564 }
3563 3565
3564 if (is_egress && 3566 if (is_egress &&
3565 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 3567 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3566 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) 3568 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3567 return ERR_PTR(-EINVAL); 3569 err = -EINVAL;
3570 goto free_ucmd;
3571 }
3568 3572
3569 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 3573 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3570 if (!dst) 3574 if (!dst) {
3571 return ERR_PTR(-ENOMEM); 3575 err = -ENOMEM;
3576 goto free_ucmd;
3577 }
3572 3578
3573 mutex_lock(&dev->flow_db->lock); 3579 mutex_lock(&dev->flow_db->lock);
3574 3580
@@ -3637,8 +3643,8 @@ destroy_ft:
3637unlock: 3643unlock:
3638 mutex_unlock(&dev->flow_db->lock); 3644 mutex_unlock(&dev->flow_db->lock);
3639 kfree(dst); 3645 kfree(dst);
3646free_ucmd:
3640 kfree(ucmd); 3647 kfree(ucmd);
3641 kfree(handler);
3642 return ERR_PTR(err); 3648 return ERR_PTR(err);
3643} 3649}
3644 3650
@@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6107 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6113 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6108 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6114 MLX5_CAP_GEN(mdev, num_vhca_ports));
6109 6115
6110 if (MLX5_VPORT_MANAGER(mdev) && 6116 if (MLX5_ESWITCH_MANAGER(mdev) &&
6111 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { 6117 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
6112 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); 6118 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
6113 6119
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0af7b7905550..f5de5adc9b1a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
266 266
267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
269 if (desc_size == 0 || srq->msrq.max_gs > desc_size) 269 if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
270 return ERR_PTR(-EINVAL); 270 err = -EINVAL;
271 goto err_srq;
272 }
271 desc_size = roundup_pow_of_two(desc_size); 273 desc_size = roundup_pow_of_two(desc_size);
272 desc_size = max_t(size_t, 32, desc_size); 274 desc_size = max_t(size_t, 32, desc_size);
273 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) 275 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
274 return ERR_PTR(-EINVAL); 276 err = -EINVAL;
277 goto err_srq;
278 }
275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 279 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
276 sizeof(struct mlx5_wqe_data_seg); 280 sizeof(struct mlx5_wqe_data_seg);
277 srq->msrq.wqe_shift = ilog2(desc_size); 281 srq->msrq.wqe_shift = ilog2(desc_size);
278 buf_size = srq->msrq.max * desc_size; 282 buf_size = srq->msrq.max * desc_size;
279 if (buf_size < desc_size) 283 if (buf_size < desc_size) {
280 return ERR_PTR(-EINVAL); 284 err = -EINVAL;
285 goto err_srq;
286 }
281 in.type = init_attr->srq_type; 287 in.type = init_attr->srq_type;
282 288
283 if (pd->uobject) 289 if (pd->uobject)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index f7ac8fc9b531..f07b8df96f43 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1957 } 1957 }
1958 1958
1959 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { 1959 if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1960 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1961 return -EINVAL;
1962
1960 if (attr_mask & IB_QP_PATH_MTU) { 1963 if (attr_mask & IB_QP_PATH_MTU) {
1961 if (attr->path_mtu < IB_MTU_256 || 1964 if (attr->path_mtu < IB_MTU_256 ||
1962 attr->path_mtu > IB_MTU_4096) { 1965 attr->path_mtu > IB_MTU_4096) {
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index f30eeba3f772..8be27238a86e 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -645,6 +645,9 @@ next_wqe:
645 } else { 645 } else {
646 goto exit; 646 goto exit;
647 } 647 }
648 if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
649 qp->sq_sig_type == IB_SIGNAL_ALL_WR)
650 rxe_run_task(&qp->comp.task, 1);
648 qp->req.wqe_index = next_index(qp->sq.queue, 651 qp->req.wqe_index = next_index(qp->sq.queue,
649 qp->req.wqe_index); 652 qp->req.wqe_index);
650 goto next_wqe; 653 goto next_wqe;
@@ -709,6 +712,7 @@ next_wqe:
709 712
710 if (fill_packet(qp, wqe, &pkt, skb, payload)) { 713 if (fill_packet(qp, wqe, &pkt, skb, payload)) {
711 pr_debug("qp#%d Error during fill packet\n", qp_num(qp)); 714 pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
715 kfree_skb(skb);
712 goto err; 716 goto err;
713 } 717 }
714 718
@@ -740,7 +744,6 @@ next_wqe:
740 goto next_wqe; 744 goto next_wqe;
741 745
742err: 746err:
743 kfree_skb(skb);
744 wqe->status = IB_WC_LOC_PROT_ERR; 747 wqe->status = IB_WC_LOC_PROT_ERR;
745 wqe->state = wqe_state_error; 748 wqe->state = wqe_state_error;
746 __rxe_do_task(&qp->comp.task); 749 __rxe_do_task(&qp->comp.task);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index cf30523c6ef6..6c7326c93721 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
131 * inactive, or if the tool type is changed, a new tracking id is 131 * inactive, or if the tool type is changed, a new tracking id is
132 * assigned to the slot. The tool type is only reported if the 132 * assigned to the slot. The tool type is only reported if the
133 * corresponding absbit field is set. 133 * corresponding absbit field is set.
134 *
135 * Returns true if contact is active.
134 */ 136 */
135void input_mt_report_slot_state(struct input_dev *dev, 137bool input_mt_report_slot_state(struct input_dev *dev,
136 unsigned int tool_type, bool active) 138 unsigned int tool_type, bool active)
137{ 139{
138 struct input_mt *mt = dev->mt; 140 struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
140 int id; 142 int id;
141 143
142 if (!mt) 144 if (!mt)
143 return; 145 return false;
144 146
145 slot = &mt->slots[mt->slot]; 147 slot = &mt->slots[mt->slot];
146 slot->frame = mt->frame; 148 slot->frame = mt->frame;
147 149
148 if (!active) { 150 if (!active) {
149 input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); 151 input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
150 return; 152 return false;
151 } 153 }
152 154
153 id = input_mt_get_value(slot, ABS_MT_TRACKING_ID); 155 id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
154 if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type) 156 if (id < 0)
155 id = input_mt_new_trkid(mt); 157 id = input_mt_new_trkid(mt);
156 158
157 input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id); 159 input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
158 input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type); 160 input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
161
162 return true;
159} 163}
160EXPORT_SYMBOL(input_mt_report_slot_state); 164EXPORT_SYMBOL(input_mt_report_slot_state);
161 165
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 48e36acbeb49..cd620e009bad 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -125,7 +125,7 @@ static const struct xpad_device {
125 u8 mapping; 125 u8 mapping;
126 u8 xtype; 126 u8 xtype;
127} xpad_device[] = { 127} xpad_device[] = {
128 { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 }, 128 { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
129 { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, 129 { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
130 { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, 130 { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
131 { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, 131 { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index f6e643b589b6..e8dae6195b30 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -45,7 +45,7 @@ struct event_dev {
45static irqreturn_t events_interrupt(int irq, void *dev_id) 45static irqreturn_t events_interrupt(int irq, void *dev_id)
46{ 46{
47 struct event_dev *edev = dev_id; 47 struct event_dev *edev = dev_id;
48 unsigned type, code, value; 48 unsigned int type, code, value;
49 49
50 type = __raw_readl(edev->addr + REG_READ); 50 type = __raw_readl(edev->addr + REG_READ);
51 code = __raw_readl(edev->addr + REG_READ); 51 code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
57} 57}
58 58
59static void events_import_bits(struct event_dev *edev, 59static void events_import_bits(struct event_dev *edev,
60 unsigned long bits[], unsigned type, size_t count) 60 unsigned long bits[], unsigned int type, size_t count)
61{ 61{
62 void __iomem *addr = edev->addr; 62 void __iomem *addr = edev->addr;
63 int i, j; 63 int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
99 99
100 for (j = 0; j < ARRAY_SIZE(val); j++) { 100 for (j = 0; j < ARRAY_SIZE(val); j++) {
101 int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32); 101 int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
102
102 val[j] = __raw_readl(edev->addr + REG_DATA + offset); 103 val[j] = __raw_readl(edev->addr + REG_DATA + offset);
103 } 104 }
104 105
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
112 struct input_dev *input_dev; 113 struct input_dev *input_dev;
113 struct event_dev *edev; 114 struct event_dev *edev;
114 struct resource *res; 115 struct resource *res;
115 unsigned keymapnamelen; 116 unsigned int keymapnamelen;
116 void __iomem *addr; 117 void __iomem *addr;
117 int irq; 118 int irq;
118 int i; 119 int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
150 for (i = 0; i < keymapnamelen; i++) 151 for (i = 0; i < keymapnamelen; i++)
151 edev->name[i] = __raw_readb(edev->addr + REG_DATA + i); 152 edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
152 153
153 pr_debug("events_probe() keymap=%s\n", edev->name); 154 pr_debug("%s: keymap=%s\n", __func__, edev->name);
154 155
155 input_dev->name = edev->name; 156 input_dev->name = edev->name;
156 input_dev->id.bustype = BUS_HOST; 157 input_dev->id.bustype = BUS_HOST;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c25606e00693..ca59a2be9bc5 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
841 To compile this driver as a module, choose M here: the 841 To compile this driver as a module, choose M here: the
842 module will be called rave-sp-pwrbutton. 842 module will be called rave-sp-pwrbutton.
843 843
844config INPUT_SC27XX_VIBRA
845 tristate "Spreadtrum sc27xx vibrator support"
846 depends on MFD_SC27XX_PMIC || COMPILE_TEST
847 select INPUT_FF_MEMLESS
848 help
849 This option enables support for Spreadtrum sc27xx vibrator driver.
850
851 To compile this driver as a module, choose M here. The module will
852 be called sc27xx_vibra.
853
844endif 854endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 72cde28649e2..9d0f9d1ff68f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
66obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o 66obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
67obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o 67obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
68obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o 68obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
69obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o
69obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o 70obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
70obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o 71obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
71obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o 72obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644
index 000000000000..295251abbdac
--- /dev/null
+++ b/drivers/input/misc/sc27xx-vibra.c
@@ -0,0 +1,154 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Spreadtrum Communications Inc.
4 */
5
6#include <linux/module.h>
7#include <linux/of_address.h>
8#include <linux/platform_device.h>
9#include <linux/regmap.h>
10#include <linux/input.h>
11#include <linux/workqueue.h>
12
13#define CUR_DRV_CAL_SEL GENMASK(13, 12)
14#define SLP_LDOVIBR_PD_EN BIT(9)
15#define LDO_VIBR_PD BIT(8)
16
17struct vibra_info {
18 struct input_dev *input_dev;
19 struct work_struct play_work;
20 struct regmap *regmap;
21 u32 base;
22 u32 strength;
23 bool enabled;
24};
25
26static void sc27xx_vibra_set(struct vibra_info *info, bool on)
27{
28 if (on) {
29 regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
30 regmap_update_bits(info->regmap, info->base,
31 SLP_LDOVIBR_PD_EN, 0);
32 info->enabled = true;
33 } else {
34 regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
35 LDO_VIBR_PD);
36 regmap_update_bits(info->regmap, info->base,
37 SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
38 info->enabled = false;
39 }
40}
41
42static int sc27xx_vibra_hw_init(struct vibra_info *info)
43{
44 return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
45}
46
47static void sc27xx_vibra_play_work(struct work_struct *work)
48{
49 struct vibra_info *info = container_of(work, struct vibra_info,
50 play_work);
51
52 if (info->strength && !info->enabled)
53 sc27xx_vibra_set(info, true);
54 else if (info->strength == 0 && info->enabled)
55 sc27xx_vibra_set(info, false);
56}
57
58static int sc27xx_vibra_play(struct input_dev *input, void *data,
59 struct ff_effect *effect)
60{
61 struct vibra_info *info = input_get_drvdata(input);
62
63 info->strength = effect->u.rumble.weak_magnitude;
64 schedule_work(&info->play_work);
65
66 return 0;
67}
68
69static void sc27xx_vibra_close(struct input_dev *input)
70{
71 struct vibra_info *info = input_get_drvdata(input);
72
73 cancel_work_sync(&info->play_work);
74 if (info->enabled)
75 sc27xx_vibra_set(info, false);
76}
77
78static int sc27xx_vibra_probe(struct platform_device *pdev)
79{
80 struct vibra_info *info;
81 int error;
82
83 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
84 if (!info)
85 return -ENOMEM;
86
87 info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
88 if (!info->regmap) {
89 dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
90 return -ENODEV;
91 }
92
93 error = device_property_read_u32(&pdev->dev, "reg", &info->base);
94 if (error) {
95 dev_err(&pdev->dev, "failed to get vibrator base address.\n");
96 return error;
97 }
98
99 info->input_dev = devm_input_allocate_device(&pdev->dev);
100 if (!info->input_dev) {
101 dev_err(&pdev->dev, "failed to allocate input device.\n");
102 return -ENOMEM;
103 }
104
105 info->input_dev->name = "sc27xx:vibrator";
106 info->input_dev->id.version = 0;
107 info->input_dev->close = sc27xx_vibra_close;
108
109 input_set_drvdata(info->input_dev, info);
110 input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
111 INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
112 info->enabled = false;
113
114 error = sc27xx_vibra_hw_init(info);
115 if (error) {
116 dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
117 return error;
118 }
119
120 error = input_ff_create_memless(info->input_dev, NULL,
121 sc27xx_vibra_play);
122 if (error) {
123 dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
124 return error;
125 }
126
127 error = input_register_device(info->input_dev);
128 if (error) {
129 dev_err(&pdev->dev, "failed to register input device.\n");
130 return error;
131 }
132
133 return 0;
134}
135
136static const struct of_device_id sc27xx_vibra_of_match[] = {
137 { .compatible = "sprd,sc2731-vibrator", },
138 {}
139};
140MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
141
142static struct platform_driver sc27xx_vibra_driver = {
143 .driver = {
144 .name = "sc27xx-vibrator",
145 .of_match_table = sc27xx_vibra_of_match,
146 },
147 .probe = sc27xx_vibra_probe,
148};
149
150module_platform_driver(sc27xx_vibra_driver);
151
152MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
153MODULE_LICENSE("GPL v2");
154MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 599544c1a91c..243e0fa6e3e3 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -27,6 +27,8 @@
27#define ETP_DISABLE_POWER 0x0001 27#define ETP_DISABLE_POWER 0x0001
28#define ETP_PRESSURE_OFFSET 25 28#define ETP_PRESSURE_OFFSET 25
29 29
30#define ETP_CALIBRATE_MAX_LEN 3
31
30/* IAP Firmware handling */ 32/* IAP Firmware handling */
31#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" 33#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
32#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" 34#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 8ff75114e762..1f9cd7d8b7ad 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
613 int tries = 20; 613 int tries = 20;
614 int retval; 614 int retval;
615 int error; 615 int error;
616 u8 val[3]; 616 u8 val[ETP_CALIBRATE_MAX_LEN];
617 617
618 retval = mutex_lock_interruptible(&data->sysfs_mutex); 618 retval = mutex_lock_interruptible(&data->sysfs_mutex);
619 if (retval) 619 if (retval)
@@ -1345,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1345 { "ELAN060C", 0 }, 1345 { "ELAN060C", 0 },
1346 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1347 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0618", 0 },
1348 { "ELAN1000", 0 }, 1349 { "ELAN1000", 0 },
1349 { } 1350 { }
1350}; 1351};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index cfcb32559925..c060d270bc4d 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -56,7 +56,7 @@
56static int elan_smbus_initialize(struct i2c_client *client) 56static int elan_smbus_initialize(struct i2c_client *client)
57{ 57{
58 u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 }; 58 u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
59 u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 }; 59 u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
60 int len, error; 60 int len, error;
61 61
62 /* Get hello packet */ 62 /* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
117static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val) 117static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
118{ 118{
119 int error; 119 int error;
120 u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
121
122 BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
120 123
121 error = i2c_smbus_read_block_data(client, 124 error = i2c_smbus_read_block_data(client,
122 ETP_SMBUS_CALIBRATE_QUERY, val); 125 ETP_SMBUS_CALIBRATE_QUERY, buf);
123 if (error < 0) 126 if (error < 0)
124 return error; 127 return error;
125 128
129 memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
126 return 0; 130 return 0;
127} 131}
128 132
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
472{ 476{
473 int len; 477 int len;
474 478
479 BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
480
475 len = i2c_smbus_read_block_data(client, 481 len = i2c_smbus_read_block_data(client,
476 ETP_SMBUS_PACKET_QUERY, 482 ETP_SMBUS_PACKET_QUERY,
477 &report[ETP_SMBUS_REPORT_OFFSET]); 483 &report[ETP_SMBUS_REPORT_OFFSET]);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index fb4d902c4403..dd85b16dc6f8 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
799 else if (ic_version == 7 && etd->info.samples[1] == 0x2A) 799 else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
800 sanity_check = ((packet[3] & 0x1c) == 0x10); 800 sanity_check = ((packet[3] & 0x1c) == 0x10);
801 else 801 else
802 sanity_check = ((packet[0] & 0x0c) == 0x04 && 802 sanity_check = ((packet[0] & 0x08) == 0x00 &&
803 (packet[3] & 0x1c) == 0x10); 803 (packet[3] & 0x1c) == 0x10);
804 804
805 if (!sanity_check) 805 if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1175 { } 1175 { }
1176}; 1176};
1177 1177
1178static const char * const middle_button_pnp_ids[] = {
1179 "LEN2131", /* ThinkPad P52 w/ NFC */
1180 "LEN2132", /* ThinkPad P52 */
1181 NULL
1182};
1183
1178/* 1184/*
1179 * Set the appropriate event bits for the input subsystem 1185 * Set the appropriate event bits for the input subsystem
1180 */ 1186 */
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1194 __clear_bit(EV_REL, dev->evbit); 1200 __clear_bit(EV_REL, dev->evbit);
1195 1201
1196 __set_bit(BTN_LEFT, dev->keybit); 1202 __set_bit(BTN_LEFT, dev->keybit);
1197 if (dmi_check_system(elantech_dmi_has_middle_button)) 1203 if (dmi_check_system(elantech_dmi_has_middle_button) ||
1204 psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
1198 __set_bit(BTN_MIDDLE, dev->keybit); 1205 __set_bit(BTN_MIDDLE, dev->keybit);
1199 __set_bit(BTN_RIGHT, dev->keybit); 1206 __set_bit(BTN_RIGHT, dev->keybit);
1200 1207
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 5ff5b1952be0..d3ff1fc09af7 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
192 else 192 else
193 input_report_rel(dev, REL_WHEEL, -wheel); 193 input_report_rel(dev, REL_WHEEL, -wheel);
194 194
195 input_report_key(dev, BTN_SIDE, BIT(4)); 195 input_report_key(dev, BTN_SIDE, packet[3] & BIT(4));
196 input_report_key(dev, BTN_EXTRA, BIT(5)); 196 input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
197 break; 197 break;
198 } 198 }
199 break; 199 break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
203 input_report_rel(dev, REL_WHEEL, -(s8) packet[3]); 203 input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
204 204
205 /* Extra buttons on Genius NewNet 3D */ 205 /* Extra buttons on Genius NewNet 3D */
206 input_report_key(dev, BTN_SIDE, BIT(6)); 206 input_report_key(dev, BTN_SIDE, packet[0] & BIT(6));
207 input_report_key(dev, BTN_EXTRA, BIT(7)); 207 input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
208 break; 208 break;
209 209
210 case PSMOUSE_THINKPS: 210 case PSMOUSE_THINKPS:
211 /* Extra button on ThinkingMouse */ 211 /* Extra button on ThinkingMouse */
212 input_report_key(dev, BTN_EXTRA, BIT(3)); 212 input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
213 213
214 /* 214 /*
215 * Without this bit of weirdness moving up gives wildly 215 * Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
223 * Cortron PS2 Trackball reports SIDE button in the 223 * Cortron PS2 Trackball reports SIDE button in the
224 * 4th bit of the first byte. 224 * 4th bit of the first byte.
225 */ 225 */
226 input_report_key(dev, BTN_SIDE, BIT(3)); 226 input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
227 packet[0] |= BIT(3); 227 packet[0] |= BIT(3);
228 break; 228 break;
229 229
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 7172b88cd064..fad2eae4a118 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4config RMI4_CORE 4config RMI4_CORE
5 tristate "Synaptics RMI4 bus support" 5 tristate "Synaptics RMI4 bus support"
6 select IRQ_DOMAIN
6 help 7 help
7 Say Y here if you want to support the Synaptics RMI4 bus. This is 8 Say Y here if you want to support the Synaptics RMI4 bus. This is
8 required for all RMI4 device support. 9 required for all RMI4 device support.
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index 8bb866c7b985..8eeffa066022 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
32 if (obj->type == RMI_2D_OBJECT_NONE) 32 if (obj->type == RMI_2D_OBJECT_NONE)
33 return; 33 return;
34 34
35 if (axis_align->swap_axes)
36 swap(obj->x, obj->y);
37
38 if (axis_align->flip_x) 35 if (axis_align->flip_x)
39 obj->x = sensor->max_x - obj->x; 36 obj->x = sensor->max_x - obj->x;
40 37
41 if (axis_align->flip_y) 38 if (axis_align->flip_y)
42 obj->y = sensor->max_y - obj->y; 39 obj->y = sensor->max_y - obj->y;
43 40
41 if (axis_align->swap_axes)
42 swap(obj->x, obj->y);
43
44 /* 44 /*
45 * Here checking if X offset or y offset are specified is 45 * Here checking if X offset or y offset are specified is
46 * redundant. We just add the offsets or clip the values. 46 * redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
120 x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x)); 120 x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
121 y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y)); 121 y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
122 122
123 if (axis_align->swap_axes)
124 swap(x, y);
125
126 if (axis_align->flip_x) 123 if (axis_align->flip_x)
127 x = min(RMI_2D_REL_POS_MAX, -x); 124 x = min(RMI_2D_REL_POS_MAX, -x);
128 125
129 if (axis_align->flip_y) 126 if (axis_align->flip_y)
130 y = min(RMI_2D_REL_POS_MAX, -y); 127 y = min(RMI_2D_REL_POS_MAX, -y);
131 128
129 if (axis_align->swap_axes)
130 swap(x, y);
131
132 if (x || y) { 132 if (x || y) {
133 input_report_rel(sensor->input, REL_X, x); 133 input_report_rel(sensor->input, REL_X, x);
134 input_report_rel(sensor->input, REL_Y, y); 134 input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
141 struct input_dev *input = sensor->input; 141 struct input_dev *input = sensor->input;
142 int res_x; 142 int res_x;
143 int res_y; 143 int res_y;
144 int max_x, max_y;
144 int input_flags = 0; 145 int input_flags = 0;
145 146
146 if (sensor->report_abs) { 147 if (sensor->report_abs) {
147 if (sensor->axis_align.swap_axes) {
148 swap(sensor->max_x, sensor->max_y);
149 swap(sensor->axis_align.clip_x_low,
150 sensor->axis_align.clip_y_low);
151 swap(sensor->axis_align.clip_x_high,
152 sensor->axis_align.clip_y_high);
153 }
154
155 sensor->min_x = sensor->axis_align.clip_x_low; 148 sensor->min_x = sensor->axis_align.clip_x_low;
156 if (sensor->axis_align.clip_x_high) 149 if (sensor->axis_align.clip_x_high)
157 sensor->max_x = min(sensor->max_x, 150 sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
163 sensor->axis_align.clip_y_high); 156 sensor->axis_align.clip_y_high);
164 157
165 set_bit(EV_ABS, input->evbit); 158 set_bit(EV_ABS, input->evbit);
166 input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x, 159
167 0, 0); 160 max_x = sensor->max_x;
168 input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y, 161 max_y = sensor->max_y;
169 0, 0); 162 if (sensor->axis_align.swap_axes)
163 swap(max_x, max_y);
164 input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
165 input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
170 166
171 if (sensor->x_mm && sensor->y_mm) { 167 if (sensor->x_mm && sensor->y_mm) {
172 res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm; 168 res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
173 res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm; 169 res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
170 if (sensor->axis_align.swap_axes)
171 swap(res_x, res_y);
174 172
175 input_abs_set_res(input, ABS_X, res_x); 173 input_abs_set_res(input, ABS_X, res_x);
176 input_abs_set_res(input, ABS_Y, res_y); 174 input_abs_set_res(input, ABS_Y, res_y);
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index c5fa53adba8d..bd0d5ff01b08 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -9,6 +9,8 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/irq.h>
13#include <linux/irqdomain.h>
12#include <linux/list.h> 14#include <linux/list.h>
13#include <linux/pm.h> 15#include <linux/pm.h>
14#include <linux/rmi.h> 16#include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
167{} 169{}
168#endif 170#endif
169 171
172static struct irq_chip rmi_irq_chip = {
173 .name = "rmi4",
174};
175
176static int rmi_create_function_irq(struct rmi_function *fn,
177 struct rmi_function_handler *handler)
178{
179 struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
180 int i, error;
181
182 for (i = 0; i < fn->num_of_irqs; i++) {
183 set_bit(fn->irq_pos + i, fn->irq_mask);
184
185 fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
186 fn->irq_pos + i);
187
188 irq_set_chip_data(fn->irq[i], fn);
189 irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
190 handle_simple_irq);
191 irq_set_nested_thread(fn->irq[i], 1);
192
193 error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
194 handler->attention, IRQF_ONESHOT,
195 dev_name(&fn->dev), fn);
196 if (error) {
197 dev_err(&fn->dev, "Error %d registering IRQ\n", error);
198 return error;
199 }
200 }
201
202 return 0;
203}
204
170static int rmi_function_probe(struct device *dev) 205static int rmi_function_probe(struct device *dev)
171{ 206{
172 struct rmi_function *fn = to_rmi_function(dev); 207 struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
178 213
179 if (handler->probe) { 214 if (handler->probe) {
180 error = handler->probe(fn); 215 error = handler->probe(fn);
181 return error; 216 if (error)
217 return error;
218 }
219
220 if (fn->num_of_irqs && handler->attention) {
221 error = rmi_create_function_irq(fn, handler);
222 if (error)
223 return error;
182 } 224 }
183 225
184 return 0; 226 return 0;
@@ -230,12 +272,18 @@ err_put_device:
230 272
231void rmi_unregister_function(struct rmi_function *fn) 273void rmi_unregister_function(struct rmi_function *fn)
232{ 274{
275 int i;
276
233 rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n", 277 rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
234 fn->fd.function_number); 278 fn->fd.function_number);
235 279
236 device_del(&fn->dev); 280 device_del(&fn->dev);
237 of_node_put(fn->dev.of_node); 281 of_node_put(fn->dev.of_node);
238 put_device(&fn->dev); 282 put_device(&fn->dev);
283
284 for (i = 0; i < fn->num_of_irqs; i++)
285 irq_dispose_mapping(fn->irq[i]);
286
239} 287}
240 288
241/** 289/**
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index b7625a9ac66a..96383eab41ba 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -14,6 +14,12 @@
14 14
15struct rmi_device; 15struct rmi_device;
16 16
17/*
18 * The interrupt source count in the function descriptor can represent up to
19 * 6 interrupt sources in the normal manner.
20 */
21#define RMI_FN_MAX_IRQS 6
22
17/** 23/**
18 * struct rmi_function - represents the implementation of an RMI4 24 * struct rmi_function - represents the implementation of an RMI4
19 * function for a particular device (basically, a driver for that RMI4 function) 25 * function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
26 * @irq_pos: The position in the irq bitfield this function holds 32 * @irq_pos: The position in the irq bitfield this function holds
27 * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN 33 * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
28 * interrupt handling. 34 * interrupt handling.
35 * @irqs: assigned virq numbers (up to num_of_irqs)
29 * 36 *
30 * @node: entry in device's list of functions 37 * @node: entry in device's list of functions
31 */ 38 */
@@ -36,6 +43,7 @@ struct rmi_function {
36 struct list_head node; 43 struct list_head node;
37 44
38 unsigned int num_of_irqs; 45 unsigned int num_of_irqs;
46 int irq[RMI_FN_MAX_IRQS];
39 unsigned int irq_pos; 47 unsigned int irq_pos;
40 unsigned long irq_mask[]; 48 unsigned long irq_mask[];
41}; 49};
@@ -76,7 +84,7 @@ struct rmi_function_handler {
76 void (*remove)(struct rmi_function *fn); 84 void (*remove)(struct rmi_function *fn);
77 int (*config)(struct rmi_function *fn); 85 int (*config)(struct rmi_function *fn);
78 int (*reset)(struct rmi_function *fn); 86 int (*reset)(struct rmi_function *fn);
79 int (*attention)(struct rmi_function *fn, unsigned long *irq_bits); 87 irqreturn_t (*attention)(int irq, void *ctx);
80 int (*suspend)(struct rmi_function *fn); 88 int (*suspend)(struct rmi_function *fn);
81 int (*resume)(struct rmi_function *fn); 89 int (*resume)(struct rmi_function *fn);
82}; 90};
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 7d29053dfb0f..fc3ab93b7aea 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -21,6 +21,7 @@
21#include <linux/pm.h> 21#include <linux/pm.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/irqdomain.h>
24#include <uapi/linux/input.h> 25#include <uapi/linux/input.h>
25#include <linux/rmi.h> 26#include <linux/rmi.h>
26#include "rmi_bus.h" 27#include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
127 return 0; 128 return 0;
128} 129}
129 130
130static void process_one_interrupt(struct rmi_driver_data *data,
131 struct rmi_function *fn)
132{
133 struct rmi_function_handler *fh;
134
135 if (!fn || !fn->dev.driver)
136 return;
137
138 fh = to_rmi_function_handler(fn->dev.driver);
139 if (fh->attention) {
140 bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
141 data->irq_count);
142 if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
143 fh->attention(fn, data->fn_irq_bits);
144 }
145}
146
147static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) 131static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
148{ 132{
149 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 133 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
150 struct device *dev = &rmi_dev->dev; 134 struct device *dev = &rmi_dev->dev;
151 struct rmi_function *entry; 135 int i;
152 int error; 136 int error;
153 137
154 if (!data) 138 if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
173 */ 157 */
174 mutex_unlock(&data->irq_mutex); 158 mutex_unlock(&data->irq_mutex);
175 159
176 /* 160 for_each_set_bit(i, data->irq_status, data->irq_count)
177 * It would be nice to be able to use irq_chip to handle these 161 handle_nested_irq(irq_find_mapping(data->irqdomain, i));
178 * nested IRQs. Unfortunately, most of the current customers for
179 * this driver are using older kernels (3.0.x) that don't support
180 * the features required for that. Once they've shifted to more
181 * recent kernels (say, 3.3 and higher), this should be switched to
182 * use irq_chip.
183 */
184 list_for_each_entry(entry, &data->function_list, node)
185 process_one_interrupt(data, entry);
186 162
187 if (data->input) 163 if (data->input)
188 input_sync(data->input); 164 input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
1001static int rmi_driver_remove(struct device *dev) 977static int rmi_driver_remove(struct device *dev)
1002{ 978{
1003 struct rmi_device *rmi_dev = to_rmi_device(dev); 979 struct rmi_device *rmi_dev = to_rmi_device(dev);
980 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
1004 981
1005 rmi_disable_irq(rmi_dev, false); 982 rmi_disable_irq(rmi_dev, false);
1006 983
984 irq_domain_remove(data->irqdomain);
985 data->irqdomain = NULL;
986
1007 rmi_f34_remove_sysfs(rmi_dev); 987 rmi_f34_remove_sysfs(rmi_dev);
1008 rmi_free_function_list(rmi_dev); 988 rmi_free_function_list(rmi_dev);
1009 989
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
1035{ 1015{
1036 struct rmi_device *rmi_dev = data->rmi_dev; 1016 struct rmi_device *rmi_dev = data->rmi_dev;
1037 struct device *dev = &rmi_dev->dev; 1017 struct device *dev = &rmi_dev->dev;
1038 int irq_count; 1018 struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
1019 int irq_count = 0;
1039 size_t size; 1020 size_t size;
1040 int retval; 1021 int retval;
1041 1022
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
1046 * being accessed. 1027 * being accessed.
1047 */ 1028 */
1048 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); 1029 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
1049 irq_count = 0;
1050 data->bootloader_mode = false; 1030 data->bootloader_mode = false;
1051 1031
1052 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); 1032 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
1058 if (data->bootloader_mode) 1038 if (data->bootloader_mode)
1059 dev_warn(dev, "Device in bootloader mode.\n"); 1039 dev_warn(dev, "Device in bootloader mode.\n");
1060 1040
1041 /* Allocate and register a linear revmap irq_domain */
1042 data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
1043 &irq_domain_simple_ops,
1044 data);
1045 if (!data->irqdomain) {
1046 dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
1047 return -ENOMEM;
1048 }
1049
1061 data->irq_count = irq_count; 1050 data->irq_count = irq_count;
1062 data->num_of_irq_regs = (data->irq_count + 7) / 8; 1051 data->num_of_irq_regs = (data->irq_count + 7) / 8;
1063 1052
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
1080{ 1069{
1081 struct rmi_device *rmi_dev = data->rmi_dev; 1070 struct rmi_device *rmi_dev = data->rmi_dev;
1082 struct device *dev = &rmi_dev->dev; 1071 struct device *dev = &rmi_dev->dev;
1083 int irq_count; 1072 int irq_count = 0;
1084 int retval; 1073 int retval;
1085 1074
1086 irq_count = 0;
1087 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); 1075 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
1088 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); 1076 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
1089 if (retval < 0) { 1077 if (retval < 0) {
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index 8a07ae147df6..4edaa14fe878 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
681 return 0; 681 return 0;
682} 682}
683 683
684static int rmi_f01_attention(struct rmi_function *fn, 684static irqreturn_t rmi_f01_attention(int irq, void *ctx)
685 unsigned long *irq_bits)
686{ 685{
686 struct rmi_function *fn = ctx;
687 struct rmi_device *rmi_dev = fn->rmi_dev; 687 struct rmi_device *rmi_dev = fn->rmi_dev;
688 int error; 688 int error;
689 u8 device_status; 689 u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
692 if (error) { 692 if (error) {
693 dev_err(&fn->dev, 693 dev_err(&fn->dev,
694 "Failed to read device status: %d.\n", error); 694 "Failed to read device status: %d.\n", error);
695 return error; 695 return IRQ_RETVAL(error);
696 } 696 }
697 697
698 if (RMI_F01_STATUS_BOOTLOADER(device_status)) 698 if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
704 error = rmi_dev->driver->reset_handler(rmi_dev); 704 error = rmi_dev->driver->reset_handler(rmi_dev);
705 if (error) { 705 if (error) {
706 dev_err(&fn->dev, "Device reset failed: %d\n", error); 706 dev_err(&fn->dev, "Device reset failed: %d\n", error);
707 return error; 707 return IRQ_RETVAL(error);
708 } 708 }
709 } 709 }
710 710
711 return 0; 711 return IRQ_HANDLED;
712} 712}
713 713
714struct rmi_function_handler rmi_f01_handler = { 714struct rmi_function_handler rmi_f01_handler = {
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 88822196d6b7..aaa1edc95522 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
244 return 0; 244 return 0;
245} 245}
246 246
247static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits) 247static irqreturn_t rmi_f03_attention(int irq, void *ctx)
248{ 248{
249 struct rmi_function *fn = ctx;
249 struct rmi_device *rmi_dev = fn->rmi_dev; 250 struct rmi_device *rmi_dev = fn->rmi_dev;
250 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 251 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
251 struct f03_data *f03 = dev_get_drvdata(&fn->dev); 252 struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
262 /* First grab the data passed by the transport device */ 263 /* First grab the data passed by the transport device */
263 if (drvdata->attn_data.size < ob_len) { 264 if (drvdata->attn_data.size < ob_len) {
264 dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n"); 265 dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
265 return 0; 266 return IRQ_HANDLED;
266 } 267 }
267 268
268 memcpy(obs, drvdata->attn_data.data, ob_len); 269 memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
277 "%s: Failed to read F03 output buffers: %d\n", 278 "%s: Failed to read F03 output buffers: %d\n",
278 __func__, error); 279 __func__, error);
279 serio_interrupt(f03->serio, 0, SERIO_TIMEOUT); 280 serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
280 return error; 281 return IRQ_RETVAL(error);
281 } 282 }
282 } 283 }
283 284
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
303 serio_interrupt(f03->serio, ob_data, serio_flags); 304 serio_interrupt(f03->serio, ob_data, serio_flags);
304 } 305 }
305 306
306 return 0; 307 return IRQ_HANDLED;
307} 308}
308 309
309static void rmi_f03_remove(struct rmi_function *fn) 310static void rmi_f03_remove(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index 12a233251793..df64d6aed4f7 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
570} 570}
571 571
572static void rmi_f11_finger_handler(struct f11_data *f11, 572static void rmi_f11_finger_handler(struct f11_data *f11,
573 struct rmi_2d_sensor *sensor, 573 struct rmi_2d_sensor *sensor, int size)
574 unsigned long *irq_bits, int num_irq_regs,
575 int size)
576{ 574{
577 const u8 *f_state = f11->data.f_state; 575 const u8 *f_state = f11->data.f_state;
578 u8 finger_state; 576 u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
581 int rel_fingers; 579 int rel_fingers;
582 int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES; 580 int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
583 581
584 int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask, 582 if (sensor->report_abs) {
585 num_irq_regs * 8);
586 int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
587 num_irq_regs * 8);
588
589 if (abs_bits) {
590 if (abs_size > size) 583 if (abs_size > size)
591 abs_fingers = size / RMI_F11_ABS_BYTES; 584 abs_fingers = size / RMI_F11_ABS_BYTES;
592 else 585 else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
604 rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i], 597 rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
605 finger_state, i); 598 finger_state, i);
606 } 599 }
607 }
608 600
609 if (rel_bits) {
610 if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
611 rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
612 else
613 rel_fingers = sensor->nbr_fingers;
614
615 for (i = 0; i < rel_fingers; i++)
616 rmi_f11_rel_pos_report(f11, i);
617 }
618
619 if (abs_bits) {
620 /* 601 /*
621 * the absolute part is made in 2 parts to allow the kernel 602 * the absolute part is made in 2 parts to allow the kernel
622 * tracking to take place. 603 * tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
638 } 619 }
639 620
640 input_mt_sync_frame(sensor->input); 621 input_mt_sync_frame(sensor->input);
622 } else if (sensor->report_rel) {
623 if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
624 rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
625 else
626 rel_fingers = sensor->nbr_fingers;
627
628 for (i = 0; i < rel_fingers; i++)
629 rmi_f11_rel_pos_report(f11, i);
641 } 630 }
631
642} 632}
643 633
644static int f11_2d_construct_data(struct f11_data *f11) 634static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
1276 return 0; 1266 return 0;
1277} 1267}
1278 1268
1279static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits) 1269static irqreturn_t rmi_f11_attention(int irq, void *ctx)
1280{ 1270{
1271 struct rmi_function *fn = ctx;
1281 struct rmi_device *rmi_dev = fn->rmi_dev; 1272 struct rmi_device *rmi_dev = fn->rmi_dev;
1282 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 1273 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
1283 struct f11_data *f11 = dev_get_drvdata(&fn->dev); 1274 struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
1303 data_base_addr, f11->sensor.data_pkt, 1294 data_base_addr, f11->sensor.data_pkt,
1304 f11->sensor.pkt_size); 1295 f11->sensor.pkt_size);
1305 if (error < 0) 1296 if (error < 0)
1306 return error; 1297 return IRQ_RETVAL(error);
1307 } 1298 }
1308 1299
1309 rmi_f11_finger_handler(f11, &f11->sensor, irq_bits, 1300 rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
1310 drvdata->num_of_irq_regs, valid_bytes);
1311 1301
1312 return 0; 1302 return IRQ_HANDLED;
1313} 1303}
1314 1304
1315static int rmi_f11_resume(struct rmi_function *fn) 1305static int rmi_f11_resume(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index a3d1aa88f2a9..5c7f48915779 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
197 rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i); 197 rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
198} 198}
199 199
200static int rmi_f12_attention(struct rmi_function *fn, 200static irqreturn_t rmi_f12_attention(int irq, void *ctx)
201 unsigned long *irq_nr_regs)
202{ 201{
203 int retval; 202 int retval;
203 struct rmi_function *fn = ctx;
204 struct rmi_device *rmi_dev = fn->rmi_dev; 204 struct rmi_device *rmi_dev = fn->rmi_dev;
205 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 205 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
206 struct f12_data *f12 = dev_get_drvdata(&fn->dev); 206 struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
222 if (retval < 0) { 222 if (retval < 0) {
223 dev_err(&fn->dev, "Failed to read object data. Code: %d.\n", 223 dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
224 retval); 224 retval);
225 return retval; 225 return IRQ_RETVAL(retval);
226 } 226 }
227 } 227 }
228 228
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
232 232
233 input_mt_sync_frame(sensor->input); 233 input_mt_sync_frame(sensor->input);
234 234
235 return 0; 235 return IRQ_HANDLED;
236} 236}
237 237
238static int rmi_f12_write_control_regs(struct rmi_function *fn) 238static int rmi_f12_write_control_regs(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 82e0f0d43d55..5e3ed5ac0c3e 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
122 } 122 }
123} 123}
124 124
125static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits) 125static irqreturn_t rmi_f30_attention(int irq, void *ctx)
126{ 126{
127 struct rmi_function *fn = ctx;
127 struct f30_data *f30 = dev_get_drvdata(&fn->dev); 128 struct f30_data *f30 = dev_get_drvdata(&fn->dev);
128 struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev); 129 struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
129 int error; 130 int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
134 if (drvdata->attn_data.size < f30->register_count) { 135 if (drvdata->attn_data.size < f30->register_count) {
135 dev_warn(&fn->dev, 136 dev_warn(&fn->dev,
136 "F30 interrupted, but data is missing\n"); 137 "F30 interrupted, but data is missing\n");
137 return 0; 138 return IRQ_HANDLED;
138 } 139 }
139 memcpy(f30->data_regs, drvdata->attn_data.data, 140 memcpy(f30->data_regs, drvdata->attn_data.data,
140 f30->register_count); 141 f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
147 dev_err(&fn->dev, 148 dev_err(&fn->dev,
148 "%s: Failed to read F30 data registers: %d\n", 149 "%s: Failed to read F30 data registers: %d\n",
149 __func__, error); 150 __func__, error);
150 return error; 151 return IRQ_RETVAL(error);
151 } 152 }
152 } 153 }
153 154
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
159 rmi_f03_commit_buttons(f30->f03); 160 rmi_f03_commit_buttons(f30->f03);
160 } 161 }
161 162
162 return 0; 163 return IRQ_HANDLED;
163} 164}
164 165
165static int rmi_f30_config(struct rmi_function *fn) 166static int rmi_f30_config(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index f1f5ac539d5d..87a7d4ba382d 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
100 return 0; 100 return 0;
101} 101}
102 102
103static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits) 103static irqreturn_t rmi_f34_attention(int irq, void *ctx)
104{ 104{
105 struct rmi_function *fn = ctx;
105 struct f34_data *f34 = dev_get_drvdata(&fn->dev); 106 struct f34_data *f34 = dev_get_drvdata(&fn->dev);
106 int ret; 107 int ret;
107 u8 status; 108 u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
126 complete(&f34->v7.cmd_done); 127 complete(&f34->v7.cmd_done);
127 } 128 }
128 129
129 return 0; 130 return IRQ_HANDLED;
130} 131}
131 132
132static int rmi_f34_write_blocks(struct f34_data *f34, const void *data, 133static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index e8a59d164019..a6f515bcab22 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -610,11 +610,6 @@ error:
610 mutex_unlock(&f54->data_mutex); 610 mutex_unlock(&f54->data_mutex);
611} 611}
612 612
613static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
614{
615 return 0;
616}
617
618static int rmi_f54_config(struct rmi_function *fn) 613static int rmi_f54_config(struct rmi_function *fn)
619{ 614{
620 struct rmi_driver *drv = fn->rmi_dev->driver; 615 struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
756 .func = 0x54, 751 .func = 0x54,
757 .probe = rmi_f54_probe, 752 .probe = rmi_f54_probe,
758 .config = rmi_f54_config, 753 .config = rmi_f54_config,
759 .attention = rmi_f54_attention,
760 .remove = rmi_f54_remove, 754 .remove = rmi_f54_remove,
761}; 755};
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index ff7043f74a3d..d196ac3d8b8c 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
603 { "GSL3692", 0 }, 603 { "GSL3692", 0 },
604 { "MSSL1680", 0 }, 604 { "MSSL1680", 0 },
605 { "MSSL0001", 0 }, 605 { "MSSL0001", 0 },
606 { "MSSL0002", 0 },
606 { } 607 { }
607}; 608};
608MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match); 609MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e055d228bfb9..689ffe538370 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,7 +142,6 @@ config DMAR_TABLE
142config INTEL_IOMMU 142config INTEL_IOMMU
143 bool "Support for Intel IOMMU using DMA Remapping Devices" 143 bool "Support for Intel IOMMU using DMA Remapping Devices"
144 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) 144 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
145 select DMA_DIRECT_OPS
146 select IOMMU_API 145 select IOMMU_API
147 select IOMMU_IOVA 146 select IOMMU_IOVA
148 select NEED_DMA_MAP_STATE 147 select NEED_DMA_MAP_STATE
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 14e4b3722428..b344a883f116 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/dmar.h> 32#include <linux/dmar.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/dma-direct.h>
35#include <linux/mempool.h> 34#include <linux/mempool.h>
36#include <linux/memory.h> 35#include <linux/memory.h>
37#include <linux/cpu.h> 36#include <linux/cpu.h>
@@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
3713 dma_addr_t *dma_handle, gfp_t flags, 3712 dma_addr_t *dma_handle, gfp_t flags,
3714 unsigned long attrs) 3713 unsigned long attrs)
3715{ 3714{
3716 void *vaddr; 3715 struct page *page = NULL;
3716 int order;
3717 3717
3718 vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); 3718 size = PAGE_ALIGN(size);
3719 if (iommu_no_mapping(dev) || !vaddr) 3719 order = get_order(size);
3720 return vaddr;
3721 3720
3722 *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), 3721 if (!iommu_no_mapping(dev))
3723 PAGE_ALIGN(size), DMA_BIDIRECTIONAL, 3722 flags &= ~(GFP_DMA | GFP_DMA32);
3724 dev->coherent_dma_mask); 3723 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3725 if (!*dma_handle) 3724 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3726 goto out_free_pages; 3725 flags |= GFP_DMA;
3727 return vaddr; 3726 else
3727 flags |= GFP_DMA32;
3728 }
3729
3730 if (gfpflags_allow_blocking(flags)) {
3731 unsigned int count = size >> PAGE_SHIFT;
3732
3733 page = dma_alloc_from_contiguous(dev, count, order, flags);
3734 if (page && iommu_no_mapping(dev) &&
3735 page_to_phys(page) + size > dev->coherent_dma_mask) {
3736 dma_release_from_contiguous(dev, page, count);
3737 page = NULL;
3738 }
3739 }
3740
3741 if (!page)
3742 page = alloc_pages(flags, order);
3743 if (!page)
3744 return NULL;
3745 memset(page_address(page), 0, size);
3746
3747 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3748 DMA_BIDIRECTIONAL,
3749 dev->coherent_dma_mask);
3750 if (*dma_handle)
3751 return page_address(page);
3752 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3753 __free_pages(page, order);
3728 3754
3729out_free_pages:
3730 dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
3731 return NULL; 3755 return NULL;
3732} 3756}
3733 3757
3734static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, 3758static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3735 dma_addr_t dma_handle, unsigned long attrs) 3759 dma_addr_t dma_handle, unsigned long attrs)
3736{ 3760{
3737 if (!iommu_no_mapping(dev)) 3761 int order;
3738 intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); 3762 struct page *page = virt_to_page(vaddr);
3739 dma_direct_free(dev, size, vaddr, dma_handle, attrs); 3763
3764 size = PAGE_ALIGN(size);
3765 order = get_order(size);
3766
3767 intel_unmap(dev, dma_handle, size);
3768 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3769 __free_pages(page, order);
3740} 3770}
3741 3771
3742static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, 3772static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 0f52d44b3f69..f5fe0100f9ff 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
199 199
200fail: 200fail:
201 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 201 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
202 gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); 202 gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
203 return err; 203 return err;
204} 204}
205 205
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5377d7e2afba..d7842d312d3e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
182 return its->collections + its_dev->event_map.col_map[event]; 182 return its->collections + its_dev->event_map.col_map[event];
183} 183}
184 184
185static struct its_collection *valid_col(struct its_collection *col)
186{
187 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
188 return NULL;
189
190 return col;
191}
192
193static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
194{
195 if (valid_col(its->collections + vpe->col_idx))
196 return vpe;
197
198 return NULL;
199}
200
185/* 201/*
186 * ITS command descriptors - parameters to be encoded in a command 202 * ITS command descriptors - parameters to be encoded in a command
187 * block. 203 * block.
@@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
439 455
440 its_fixup_cmd(cmd); 456 its_fixup_cmd(cmd);
441 457
442 return col; 458 return valid_col(col);
443} 459}
444 460
445static struct its_collection *its_build_movi_cmd(struct its_node *its, 461static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
458 474
459 its_fixup_cmd(cmd); 475 its_fixup_cmd(cmd);
460 476
461 return col; 477 return valid_col(col);
462} 478}
463 479
464static struct its_collection *its_build_discard_cmd(struct its_node *its, 480static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
476 492
477 its_fixup_cmd(cmd); 493 its_fixup_cmd(cmd);
478 494
479 return col; 495 return valid_col(col);
480} 496}
481 497
482static struct its_collection *its_build_inv_cmd(struct its_node *its, 498static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
494 510
495 its_fixup_cmd(cmd); 511 its_fixup_cmd(cmd);
496 512
497 return col; 513 return valid_col(col);
498} 514}
499 515
500static struct its_collection *its_build_int_cmd(struct its_node *its, 516static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
512 528
513 its_fixup_cmd(cmd); 529 its_fixup_cmd(cmd);
514 530
515 return col; 531 return valid_col(col);
516} 532}
517 533
518static struct its_collection *its_build_clear_cmd(struct its_node *its, 534static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
530 546
531 its_fixup_cmd(cmd); 547 its_fixup_cmd(cmd);
532 548
533 return col; 549 return valid_col(col);
534} 550}
535 551
536static struct its_collection *its_build_invall_cmd(struct its_node *its, 552static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
554 570
555 its_fixup_cmd(cmd); 571 its_fixup_cmd(cmd);
556 572
557 return desc->its_vinvall_cmd.vpe; 573 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
558} 574}
559 575
560static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, 576static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
576 592
577 its_fixup_cmd(cmd); 593 its_fixup_cmd(cmd);
578 594
579 return desc->its_vmapp_cmd.vpe; 595 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
580} 596}
581 597
582static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, 598static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
599 615
600 its_fixup_cmd(cmd); 616 its_fixup_cmd(cmd);
601 617
602 return desc->its_vmapti_cmd.vpe; 618 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
603} 619}
604 620
605static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, 621static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
622 638
623 its_fixup_cmd(cmd); 639 its_fixup_cmd(cmd);
624 640
625 return desc->its_vmovi_cmd.vpe; 641 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
626} 642}
627 643
628static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, 644static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
640 656
641 its_fixup_cmd(cmd); 657 its_fixup_cmd(cmd);
642 658
643 return desc->its_vmovp_cmd.vpe; 659 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
644} 660}
645 661
646static u64 its_cmd_ptr_to_offset(struct its_node *its, 662static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its)
1824 1840
1825static int its_alloc_collections(struct its_node *its) 1841static int its_alloc_collections(struct its_node *its)
1826{ 1842{
1843 int i;
1844
1827 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), 1845 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
1828 GFP_KERNEL); 1846 GFP_KERNEL);
1829 if (!its->collections) 1847 if (!its->collections)
1830 return -ENOMEM; 1848 return -ENOMEM;
1831 1849
1850 for (i = 0; i < nr_cpu_ids; i++)
1851 its->collections[i].target_address = ~0ULL;
1852
1832 return 0; 1853 return 0;
1833} 1854}
1834 1855
@@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
2310 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2331 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2311 2332
2312 /* Bind the LPI to the first possible CPU */ 2333 /* Bind the LPI to the first possible CPU */
2313 cpu = cpumask_first(cpu_mask); 2334 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2335 if (cpu >= nr_cpu_ids) {
2336 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2337 return -EINVAL;
2338
2339 cpu = cpumask_first(cpu_online_mask);
2340 }
2341
2314 its_dev->event_map.col_map[event] = cpu; 2342 its_dev->event_map.col_map[event] = cpu;
2315 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2343 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2316 2344
@@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void)
3399 u64 timeout = USEC_PER_SEC; 3427 u64 timeout = USEC_PER_SEC;
3400 u64 val; 3428 u64 val;
3401 3429
3430 /*
3431 * If coming via a CPU hotplug event, we don't need to disable
3432 * LPIs before trying to re-enable them. They are already
3433 * configured and all is well in the world. Detect this case
3434 * by checking the allocation of the pending table for the
3435 * current CPU.
3436 */
3437 if (gic_data_rdist()->pend_page)
3438 return 0;
3439
3402 if (!gic_rdists_supports_plpis()) { 3440 if (!gic_rdists_supports_plpis()) {
3403 pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); 3441 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3404 return -ENXIO; 3442 return -ENXIO;
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 1ec3bfe56693..c671b3212010 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
93 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 93 msg->address_lo = lower_32_bits(msi_data->msiir_addr);
94 msg->data = data->hwirq; 94 msg->data = data->hwirq;
95 95
96 if (msi_affinity_flag) 96 if (msi_affinity_flag) {
97 msg->data |= cpumask_first(data->common->affinity); 97 const struct cpumask *mask;
98
99 mask = irq_data_get_effective_affinity_mask(data);
100 msg->data |= cpumask_first(mask);
101 }
98 102
99 iommu_dma_map_msi_msg(data->irq, msg); 103 iommu_dma_map_msi_msg(data->irq, msg);
100} 104}
@@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
121 return -EINVAL; 125 return -EINVAL;
122 } 126 }
123 127
124 cpumask_copy(irq_data->common->affinity, mask); 128 irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
125 129
126 return IRQ_SET_MASK_OK; 130 return IRQ_SET_MASK_OK;
127} 131}
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 98f90aadd141..18c0a1281914 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
588 .getname = data_sock_getname, 588 .getname = data_sock_getname,
589 .sendmsg = mISDN_sock_sendmsg, 589 .sendmsg = mISDN_sock_sendmsg,
590 .recvmsg = mISDN_sock_recvmsg, 590 .recvmsg = mISDN_sock_recvmsg,
591 .poll_mask = datagram_poll_mask, 591 .poll = datagram_poll,
592 .listen = sock_no_listen, 592 .listen = sock_no_listen,
593 .shutdown = sock_no_shutdown, 593 .shutdown = sock_no_shutdown,
594 .setsockopt = data_sock_setsockopt, 594 .setsockopt = data_sock_setsockopt,
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 10c08982185a..9c03f35d9df1 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig NVM 5menuconfig NVM
6 bool "Open-Channel SSD target support" 6 bool "Open-Channel SSD target support"
7 depends on BLOCK && HAS_DMA && PCI 7 depends on BLOCK && PCI
8 select BLK_DEV_NVME 8 select BLK_DEV_NVME
9 help 9 help
10 Say Y here to get to enable Open-channel SSDs. 10 Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index ab13fcec3fca..75df4c9d8b54 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
588} 588}
589 589
590/* Return md raid10 algorithm for @name */ 590/* Return md raid10 algorithm for @name */
591static const int raid10_name_to_format(const char *name) 591static int raid10_name_to_format(const char *name)
592{ 592{
593 if (!strcasecmp(name, "near")) 593 if (!strcasecmp(name, "near"))
594 return ALGORITHM_RAID10_NEAR; 594 return ALGORITHM_RAID10_NEAR;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 938766794c2e..3d0e2c198f06 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
885static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, 885static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
886 sector_t start, sector_t len, void *data) 886 sector_t start, sector_t len, void *data)
887{ 887{
888 struct request_queue *q = bdev_get_queue(dev->bdev); 888 return bdev_dax_supported(dev->bdev, PAGE_SIZE);
889
890 return q && blk_queue_dax(q);
891} 889}
892 890
893static bool dm_table_supports_dax(struct dm_table *t) 891static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1907 1905
1908 if (dm_table_supports_dax(t)) 1906 if (dm_table_supports_dax(t))
1909 blk_queue_flag_set(QUEUE_FLAG_DAX, q); 1907 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
1908 else
1909 blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
1910
1910 if (dm_table_supports_dax_write_cache(t)) 1911 if (dm_table_supports_dax_write_cache(t))
1911 dax_write_cache(t->md->dax_dev, true); 1912 dax_write_cache(t->md->dax_dev, true);
1912 1913
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 36ef284ad086..72142021b5c9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
776static int __commit_transaction(struct dm_pool_metadata *pmd) 776static int __commit_transaction(struct dm_pool_metadata *pmd)
777{ 777{
778 int r; 778 int r;
779 size_t metadata_len, data_len;
780 struct thin_disk_superblock *disk_super; 779 struct thin_disk_superblock *disk_super;
781 struct dm_block *sblock; 780 struct dm_block *sblock;
782 781
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
797 if (r < 0) 796 if (r < 0)
798 return r; 797 return r;
799 798
800 r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
801 if (r < 0)
802 return r;
803
804 r = dm_sm_root_size(pmd->data_sm, &data_len);
805 if (r < 0)
806 return r;
807
808 r = save_sm_roots(pmd); 799 r = save_sm_roots(pmd);
809 if (r < 0) 800 if (r < 0)
810 return r; 801 return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7945238df1c0..b900723bbd0f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1386 1386
1387static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); 1387static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1388 1388
1389static void requeue_bios(struct pool *pool);
1390
1389static void check_for_space(struct pool *pool) 1391static void check_for_space(struct pool *pool)
1390{ 1392{
1391 int r; 1393 int r;
@@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
1398 if (r) 1400 if (r)
1399 return; 1401 return;
1400 1402
1401 if (nr_free) 1403 if (nr_free) {
1402 set_pool_mode(pool, PM_WRITE); 1404 set_pool_mode(pool, PM_WRITE);
1405 requeue_bios(pool);
1406 }
1403} 1407}
1404 1408
1405/* 1409/*
@@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1476 1480
1477 r = dm_pool_alloc_data_block(pool->pmd, result); 1481 r = dm_pool_alloc_data_block(pool->pmd, result);
1478 if (r) { 1482 if (r) {
1479 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); 1483 if (r == -ENOSPC)
1484 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1485 else
1486 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1480 return r; 1487 return r;
1481 } 1488 }
1482 1489
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5961c7794ef3..07ea6a48aac6 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -259,7 +259,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
259 if (da != p) { 259 if (da != p) {
260 long i; 260 long i;
261 wc->memory_map = NULL; 261 wc->memory_map = NULL;
262 pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL); 262 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
263 if (!pages) { 263 if (!pages) {
264 r = -ENOMEM; 264 r = -ENOMEM;
265 goto err2; 265 goto err2;
@@ -859,7 +859,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
859 859
860 if (wc->entries) 860 if (wc->entries)
861 return 0; 861 return 0;
862 wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks); 862 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
863 if (!wc->entries) 863 if (!wc->entries)
864 return -ENOMEM; 864 return -ENOMEM;
865 for (b = 0; b < wc->n_blocks; b++) { 865 for (b = 0; b < wc->n_blocks; b++) {
@@ -1481,9 +1481,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
1481 wb->bio.bi_iter.bi_sector = read_original_sector(wc, e); 1481 wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
1482 wb->page_offset = PAGE_SIZE; 1482 wb->page_offset = PAGE_SIZE;
1483 if (max_pages <= WB_LIST_INLINE || 1483 if (max_pages <= WB_LIST_INLINE ||
1484 unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *), 1484 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1485 GFP_NOIO | __GFP_NORETRY | 1485 GFP_NOIO | __GFP_NORETRY |
1486 __GFP_NOMEMALLOC | __GFP_NOWARN)))) { 1486 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1487 wb->wc_list = wb->wc_list_inline; 1487 wb->wc_list = wb->wc_list_inline;
1488 max_pages = WB_LIST_INLINE; 1488 max_pages = WB_LIST_INLINE;
1489 } 1489 }
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 3c0e45f4dcf5..a44183ff4be0 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
787 787
788 /* Chunk BIO work */ 788 /* Chunk BIO work */
789 mutex_init(&dmz->chunk_lock); 789 mutex_init(&dmz->chunk_lock);
790 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); 790 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
791 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 791 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
792 0, dev->name); 792 0, dev->name);
793 if (!dmz->chunk_wq) { 793 if (!dmz->chunk_wq) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e65429a29c06..b0dd7027848b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1056 if (len < 1) 1056 if (len < 1)
1057 goto out; 1057 goto out;
1058 nr_pages = min(len, nr_pages); 1058 nr_pages = min(len, nr_pages);
1059 if (ti->type->direct_access) 1059 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1060 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
1061 1060
1062 out: 1061 out:
1063 dm_put_live_table(md, srcu_idx); 1062 dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1606 * the usage of io->orig_bio in dm_remap_zone_report() 1605 * the usage of io->orig_bio in dm_remap_zone_report()
1607 * won't be affected by this reassignment. 1606 * won't be affected by this reassignment.
1608 */ 1607 */
1609 struct bio *b = bio_clone_bioset(bio, GFP_NOIO, 1608 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1610 &md->queue->bio_split); 1609 GFP_NOIO, &md->queue->bio_split);
1611 ci.io->orig_bio = b; 1610 ci.io->orig_bio = b;
1612 bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
1613 bio_chain(b, bio); 1611 bio_chain(b, bio);
1614 ret = generic_make_request(bio); 1612 ret = generic_make_request(bio);
1615 break; 1613 break;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 29b0cd9ec951..994aed2f9dff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
5547 else 5547 else
5548 pr_warn("md: personality for level %s is not loaded!\n", 5548 pr_warn("md: personality for level %s is not loaded!\n",
5549 mddev->clevel); 5549 mddev->clevel);
5550 return -EINVAL; 5550 err = -EINVAL;
5551 goto abort;
5551 } 5552 }
5552 spin_unlock(&pers_lock); 5553 spin_unlock(&pers_lock);
5553 if (mddev->level != pers->level) { 5554 if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
5560 pers->start_reshape == NULL) { 5561 pers->start_reshape == NULL) {
5561 /* This personality cannot handle reshaping... */ 5562 /* This personality cannot handle reshaping... */
5562 module_put(pers->owner); 5563 module_put(pers->owner);
5563 return -EINVAL; 5564 err = -EINVAL;
5565 goto abort;
5564 } 5566 }
5565 5567
5566 if (pers->sync_request) { 5568 if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
5629 mddev->private = NULL; 5631 mddev->private = NULL;
5630 module_put(pers->owner); 5632 module_put(pers->owner);
5631 bitmap_destroy(mddev); 5633 bitmap_destroy(mddev);
5632 return err; 5634 goto abort;
5633 } 5635 }
5634 if (mddev->queue) { 5636 if (mddev->queue) {
5635 bool nonrot = true; 5637 bool nonrot = true;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 478cf446827f..35bd3a62451b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
3893 disk->rdev->saved_raid_disk < 0) 3893 disk->rdev->saved_raid_disk < 0)
3894 conf->fullsync = 1; 3894 conf->fullsync = 1;
3895 } 3895 }
3896
3897 if (disk->replacement &&
3898 !test_bit(In_sync, &disk->replacement->flags) &&
3899 disk->replacement->saved_raid_disk < 0) {
3900 conf->fullsync = 1;
3901 }
3902
3896 disk->recovery_disabled = mddev->recovery_disabled - 1; 3903 disk->recovery_disabled = mddev->recovery_disabled - 1;
3897 } 3904 }
3898 3905
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 40826bba06b6..fcfab6635f9c 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
207 bpf_prog_array_free(rcdev->raw->progs); 207 bpf_prog_array_free(rcdev->raw->progs);
208} 208}
209 209
210int lirc_prog_attach(const union bpf_attr *attr) 210int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
211{ 211{
212 struct bpf_prog *prog;
213 struct rc_dev *rcdev; 212 struct rc_dev *rcdev;
214 int ret; 213 int ret;
215 214
216 if (attr->attach_flags) 215 if (attr->attach_flags)
217 return -EINVAL; 216 return -EINVAL;
218 217
219 prog = bpf_prog_get_type(attr->attach_bpf_fd,
220 BPF_PROG_TYPE_LIRC_MODE2);
221 if (IS_ERR(prog))
222 return PTR_ERR(prog);
223
224 rcdev = rc_dev_get_from_fd(attr->target_fd); 218 rcdev = rc_dev_get_from_fd(attr->target_fd);
225 if (IS_ERR(rcdev)) { 219 if (IS_ERR(rcdev))
226 bpf_prog_put(prog);
227 return PTR_ERR(rcdev); 220 return PTR_ERR(rcdev);
228 }
229 221
230 ret = lirc_bpf_attach(rcdev, prog); 222 ret = lirc_bpf_attach(rcdev, prog);
231 if (ret)
232 bpf_prog_put(prog);
233 223
234 put_device(&rcdev->dev); 224 put_device(&rcdev->dev);
235 225
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e05c3245930a..fa840666bdd1 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
507static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 507static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
508{ 508{
509 void __iomem *address = (void __iomem *)file->private_data; 509 void __iomem *address = (void __iomem *)file->private_data;
510 unsigned char *page;
511 int retval;
512 int len = 0; 510 int len = 0;
513 unsigned int value; 511 unsigned int value;
514 512 char lbuf[20];
515 if (*offset < 0)
516 return -EINVAL;
517 if (count == 0 || count > 1024)
518 return 0;
519 if (*offset != 0)
520 return 0;
521
522 page = (unsigned char *)__get_free_page(GFP_KERNEL);
523 if (!page)
524 return -ENOMEM;
525 513
526 value = readl(address); 514 value = readl(address);
527 len = sprintf(page, "%d\n", value); 515 len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
528
529 if (copy_to_user(buf, page, len)) {
530 retval = -EFAULT;
531 goto exit;
532 }
533 *offset += len;
534 retval = len;
535 516
536exit: 517 return simple_read_from_buffer(buf, count, offset, lbuf, len);
537 free_page((unsigned long)page);
538 return retval;
539} 518}
540 519
541static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) 520static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b0b8f18a85e3..6649f0d56d2f 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev,
310 if (&cl->link == &dev->file_list) { 310 if (&cl->link == &dev->file_list) {
311 /* A message for not connected fixed address clients 311 /* A message for not connected fixed address clients
312 * should be silently discarded 312 * should be silently discarded
313 * On power down client may be force cleaned,
314 * silently discard such messages
313 */ 315 */
314 if (hdr_is_fixed(mei_hdr)) { 316 if (hdr_is_fixed(mei_hdr) ||
317 dev->dev_state == MEI_DEV_POWER_DOWN) {
315 mei_irq_discard_msg(dev, mei_hdr); 318 mei_irq_discard_msg(dev, mei_hdr);
316 ret = 0; 319 ret = 0;
317 goto reset_slots; 320 goto reset_slots;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index efd733472a35..56c6f79a5c5a 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
467 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 467 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
468{ 468{
469 unsigned long status; 469 unsigned long status;
470 unsigned long pfn = page_to_pfn(b->page); 470 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
471 471
472 STATS_INC(b->stats.lock[is_2m_pages]); 472 STATS_INC(b->stats.lock[is_2m_pages]);
473 473
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
515 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 515 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
516{ 516{
517 unsigned long status; 517 unsigned long status;
518 unsigned long pfn = page_to_pfn(b->page); 518 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
519 519
520 STATS_INC(b->stats.unlock[is_2m_pages]); 520 STATS_INC(b->stats.unlock[is_2m_pages]);
521 521
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index ef05e0039378..2a833686784b 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -27,8 +27,8 @@ struct mmc_gpio {
27 bool override_cd_active_level; 27 bool override_cd_active_level;
28 irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); 28 irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
29 char *ro_label; 29 char *ro_label;
30 char cd_label[0];
31 u32 cd_debounce_delay_ms; 30 u32 cd_debounce_delay_ms;
31 char cd_label[];
32}; 32};
33 33
34static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) 34static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 623f4d27fa01..80dc2fd6576c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1065 * It's used when HS400 mode is enabled. 1065 * It's used when HS400 mode is enabled.
1066 */ 1066 */
1067 if (data->flags & MMC_DATA_WRITE && 1067 if (data->flags & MMC_DATA_WRITE &&
1068 !(host->timing != MMC_TIMING_MMC_HS400)) 1068 host->timing != MMC_TIMING_MMC_HS400)
1069 return; 1069 goto disable;
1070 1070
1071 if (data->flags & MMC_DATA_WRITE) 1071 if (data->flags & MMC_DATA_WRITE)
1072 enable = SDMMC_CARD_WR_THR_EN; 1072 enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1074 enable = SDMMC_CARD_RD_THR_EN; 1074 enable = SDMMC_CARD_RD_THR_EN;
1075 1075
1076 if (host->timing != MMC_TIMING_MMC_HS200 && 1076 if (host->timing != MMC_TIMING_MMC_HS200 &&
1077 host->timing != MMC_TIMING_UHS_SDR104) 1077 host->timing != MMC_TIMING_UHS_SDR104 &&
1078 host->timing != MMC_TIMING_MMC_HS400)
1078 goto disable; 1079 goto disable;
1079 1080
1080 blksz_depth = blksz / (1 << host->data_shift); 1081 blksz_depth = blksz / (1 << host->data_shift);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f7f9773d161f..d032bd63444d 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
139 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, 139 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
140 RST_RESERVED_BITS | val); 140 RST_RESERVED_BITS | val);
141 141
142 if (host->data && host->data->flags & MMC_DATA_READ) 142 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
143 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
144 143
145 renesas_sdhi_internal_dmac_enable_dma(host, true); 144 renesas_sdhi_internal_dmac_enable_dma(host, true);
146} 145}
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
164 goto force_pio; 163 goto force_pio;
165 164
166 /* This DMAC cannot handle if buffer is not 8-bytes alignment */ 165 /* This DMAC cannot handle if buffer is not 8-bytes alignment */
167 if (!IS_ALIGNED(sg_dma_address(sg), 8)) { 166 if (!IS_ALIGNED(sg_dma_address(sg), 8))
168 dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, 167 goto force_pio_with_unmap;
169 mmc_get_dma_dir(data));
170 goto force_pio;
171 }
172 168
173 if (data->flags & MMC_DATA_READ) { 169 if (data->flags & MMC_DATA_READ) {
174 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 170 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
175 if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && 171 if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
176 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) 172 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
177 goto force_pio; 173 goto force_pio_with_unmap;
178 } else { 174 } else {
179 dtran_mode |= DTRAN_MODE_CH_NUM_CH0; 175 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
180 } 176 }
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
189 185
190 return; 186 return;
191 187
188force_pio_with_unmap:
189 dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
190
192force_pio: 191force_pio:
193 host->force_pio = true; 192 host->force_pio = true;
194 renesas_sdhi_internal_dmac_enable_dma(host, false); 193 renesas_sdhi_internal_dmac_enable_dma(host, false);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d6aef70d34fa..4eb3d29ecde1 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
312 312
313 if (imx_data->socdata->flags & ESDHC_FLAG_HS400) 313 if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
314 val |= SDHCI_SUPPORT_HS400; 314 val |= SDHCI_SUPPORT_HS400;
315
316 /*
317 * Do not advertise faster UHS modes if there are no
318 * pinctrl states for 100MHz/200MHz.
319 */
320 if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
321 IS_ERR_OR_NULL(imx_data->pins_200mhz))
322 val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
323 | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
315 } 324 }
316 } 325 }
317 326
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
1158 ESDHC_PINCTRL_STATE_100MHZ); 1167 ESDHC_PINCTRL_STATE_100MHZ);
1159 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, 1168 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1160 ESDHC_PINCTRL_STATE_200MHZ); 1169 ESDHC_PINCTRL_STATE_200MHZ);
1161 if (IS_ERR(imx_data->pins_100mhz) ||
1162 IS_ERR(imx_data->pins_200mhz)) {
1163 dev_warn(mmc_dev(host->mmc),
1164 "could not get ultra high speed state, work on normal mode\n");
1165 /*
1166 * fall back to not supporting uhs by specifying no
1167 * 1.8v quirk
1168 */
1169 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1170 }
1171 } else {
1172 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1173 } 1170 }
1174 1171
1175 /* call to generic mmc_of_parse to support additional capabilities */ 1172 /* call to generic mmc_of_parse to support additional capabilities */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index e7472590f2ed..8e7f3e35ee3d 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
1446 sunxi_mmc_init_host(host); 1446 sunxi_mmc_init_host(host);
1447 sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); 1447 sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
1448 sunxi_mmc_set_clk(host, &mmc->ios); 1448 sunxi_mmc_set_clk(host, &mmc->ios);
1449 enable_irq(host->irq);
1449 1450
1450 return 0; 1451 return 0;
1451} 1452}
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
1455 struct mmc_host *mmc = dev_get_drvdata(dev); 1456 struct mmc_host *mmc = dev_get_drvdata(dev);
1456 struct sunxi_mmc_host *host = mmc_priv(mmc); 1457 struct sunxi_mmc_host *host = mmc_priv(mmc);
1457 1458
1459 /*
1460 * When clocks are off, it's possible receiving
1461 * fake interrupts, which will stall the system.
1462 * Disabling the irq will prevent this.
1463 */
1464 disable_irq(host->irq);
1458 sunxi_mmc_reset_host(host); 1465 sunxi_mmc_reset_host(host);
1459 sunxi_mmc_disable(host); 1466 sunxi_mmc_disable(host);
1460 1467
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a0c655628d6d..1b64ac8c5bc8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2526 2526
2527struct ppb_lock { 2527struct ppb_lock {
2528 struct flchip *chip; 2528 struct flchip *chip;
2529 loff_t offset; 2529 unsigned long adr;
2530 int locked; 2530 int locked;
2531}; 2531};
2532 2532
@@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2544 unsigned long timeo; 2544 unsigned long timeo;
2545 int ret; 2545 int ret;
2546 2546
2547 adr += chip->start;
2547 mutex_lock(&chip->mutex); 2548 mutex_lock(&chip->mutex);
2548 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2549 ret = get_chip(map, chip, adr, FL_LOCKING);
2549 if (ret) { 2550 if (ret) {
2550 mutex_unlock(&chip->mutex); 2551 mutex_unlock(&chip->mutex);
2551 return ret; 2552 return ret;
@@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2563 2564
2564 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2565 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2565 chip->state = FL_LOCKING; 2566 chip->state = FL_LOCKING;
2566 map_write(map, CMD(0xA0), chip->start + adr); 2567 map_write(map, CMD(0xA0), adr);
2567 map_write(map, CMD(0x00), chip->start + adr); 2568 map_write(map, CMD(0x00), adr);
2568 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2569 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2569 /* 2570 /*
2570 * Unlocking of one specific sector is not supported, so we 2571 * Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2602 map_write(map, CMD(0x00), chip->start); 2603 map_write(map, CMD(0x00), chip->start);
2603 2604
2604 chip->state = FL_READY; 2605 chip->state = FL_READY;
2605 put_chip(map, chip, adr + chip->start); 2606 put_chip(map, chip, adr);
2606 mutex_unlock(&chip->mutex); 2607 mutex_unlock(&chip->mutex);
2607 2608
2608 return ret; 2609 return ret;
@@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2659 * sectors shall be unlocked, so lets keep their locking 2660 * sectors shall be unlocked, so lets keep their locking
2660 * status at "unlocked" (locked=0) for the final re-locking. 2661 * status at "unlocked" (locked=0) for the final re-locking.
2661 */ 2662 */
2662 if ((adr < ofs) || (adr >= (ofs + len))) { 2663 if ((offset < ofs) || (offset >= (ofs + len))) {
2663 sect[sectors].chip = &cfi->chips[chipnum]; 2664 sect[sectors].chip = &cfi->chips[chipnum];
2664 sect[sectors].offset = offset; 2665 sect[sectors].adr = adr;
2665 sect[sectors].locked = do_ppb_xxlock( 2666 sect[sectors].locked = do_ppb_xxlock(
2666 map, &cfi->chips[chipnum], adr, 0, 2667 map, &cfi->chips[chipnum], adr, 0,
2667 DO_XXLOCK_ONEBLOCK_GETLOCK); 2668 DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2675 i++; 2676 i++;
2676 2677
2677 if (adr >> cfi->chipshift) { 2678 if (adr >> cfi->chipshift) {
2679 if (offset >= (ofs + len))
2680 break;
2678 adr = 0; 2681 adr = 0;
2679 chipnum++; 2682 chipnum++;
2680 2683
@@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2705 */ 2708 */
2706 for (i = 0; i < sectors; i++) { 2709 for (i = 0; i < sectors; i++) {
2707 if (sect[i].locked) 2710 if (sect[i].locked)
2708 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, 2711 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2709 DO_XXLOCK_ONEBLOCK_LOCK); 2712 DO_XXLOCK_ONEBLOCK_LOCK);
2710 } 2713 }
2711 2714
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 3a6f450d1093..53febe8a68c3 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
733 { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS}, 733 { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
734 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, 734 { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
735 735
736 { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, 736 { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
737 { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, 737 { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
738}; 738};
739 739
740static struct flash_info *jedec_lookup(struct spi_device *spi, 740static struct flash_info *jedec_lookup(struct spi_device *spi,
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index cfd33e6ca77f..5869e90cc14b 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
123 if (ret) 123 if (ret)
124 return ret; 124 return ret;
125 125
126 denali->clk_x_rate = clk_get_rate(dt->clk); 126 /*
127 * Hardcode the clock rate for the backward compatibility.
128 * This works for both SOCFPGA and UniPhier.
129 */
130 denali->clk_x_rate = 200000000;
127 131
128 ret = denali_init(denali); 132 ret = denali_init(denali);
129 if (ret) 133 if (ret)
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 45786e707b7b..26cef218bb43 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -48,7 +48,7 @@
48#define NFC_V1_V2_CONFIG (host->regs + 0x0a) 48#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
49#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) 49#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
50#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) 50#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
51#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) 51#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
52#define NFC_V1_V2_WRPROT (host->regs + 0x12) 52#define NFC_V1_V2_WRPROT (host->regs + 0x12)
53#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) 53#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
54#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) 54#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
1274 writew(config1, NFC_V1_V2_CONFIG1); 1274 writew(config1, NFC_V1_V2_CONFIG1);
1275 /* preset operation */ 1275 /* preset operation */
1276 1276
1277 /* spare area size in 16-bit half-words */
1278 writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
1279
1277 /* Unlock the internal RAM Buffer */ 1280 /* Unlock the internal RAM Buffer */
1278 writew(0x2, NFC_V1_V2_CONFIG); 1281 writew(0x2, NFC_V1_V2_CONFIG);
1279 1282
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 10c4f9919850..b01d15ec4c56 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
440 440
441 for (; page < page_end; page++) { 441 for (; page < page_end; page++) {
442 res = chip->ecc.read_oob(mtd, chip, page); 442 res = chip->ecc.read_oob(mtd, chip, page);
443 if (res) 443 if (res < 0)
444 return res; 444 return res;
445 445
446 bad = chip->oob_poi[chip->badblockpos]; 446 bad = chip->oob_poi[chip->badblockpos];
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 7ed1f87e742a..49c546c97c6f 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -17,23 +17,47 @@
17 17
18#include <linux/mtd/rawnand.h> 18#include <linux/mtd/rawnand.h>
19 19
20/*
21 * Macronix AC series does not support using SET/GET_FEATURES to change
22 * the timings unlike what is declared in the parameter page. Unflag
23 * this feature to avoid unnecessary downturns.
24 */
25static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
26{
27 unsigned int i;
28 static const char * const broken_get_timings[] = {
29 "MX30LF1G18AC",
30 "MX30LF1G28AC",
31 "MX30LF2G18AC",
32 "MX30LF2G28AC",
33 "MX30LF4G18AC",
34 "MX30LF4G28AC",
35 "MX60LF8G18AC",
36 };
37
38 if (!chip->parameters.supports_set_get_features)
39 return;
40
41 for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
42 if (!strcmp(broken_get_timings[i], chip->parameters.model))
43 break;
44 }
45
46 if (i == ARRAY_SIZE(broken_get_timings))
47 return;
48
49 bitmap_clear(chip->parameters.get_feature_list,
50 ONFI_FEATURE_ADDR_TIMING_MODE, 1);
51 bitmap_clear(chip->parameters.set_feature_list,
52 ONFI_FEATURE_ADDR_TIMING_MODE, 1);
53}
54
20static int macronix_nand_init(struct nand_chip *chip) 55static int macronix_nand_init(struct nand_chip *chip)
21{ 56{
22 if (nand_is_slc(chip)) 57 if (nand_is_slc(chip))
23 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; 58 chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
24 59
25 /* 60 macronix_nand_fix_broken_get_timings(chip);
26 * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
27 * the timings unlike what is declared in the parameter page. Unflag
28 * this feature to avoid unnecessary downturns.
29 */
30 if (chip->parameters.supports_set_get_features &&
31 !strcmp("MX30LF2G18AC", chip->parameters.model)) {
32 bitmap_clear(chip->parameters.get_feature_list,
33 ONFI_FEATURE_ADDR_TIMING_MODE, 1);
34 bitmap_clear(chip->parameters.set_feature_list,
35 ONFI_FEATURE_ADDR_TIMING_MODE, 1);
36 }
37 61
38 return 0; 62 return 0;
39} 63}
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 0af45b134c0c..5ec4c90a637d 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
66 66
67 if (p->supports_set_get_features) { 67 if (p->supports_set_get_features) {
68 set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list); 68 set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
69 set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
69 set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list); 70 set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
71 set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
70 } 72 }
71 73
72 return 0; 74 return 0;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index c3f7aaa5d18f..d7e10b36a0b9 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
926 if (ret) 926 if (ret)
927 return ret; 927 return ret;
928 928
929 if (f_pdata->use_direct_mode) 929 if (f_pdata->use_direct_mode) {
930 memcpy_toio(cqspi->ahb_base + to, buf, len); 930 memcpy_toio(cqspi->ahb_base + to, buf, len);
931 else 931 ret = cqspi_wait_idle(cqspi);
932 } else {
932 ret = cqspi_indirect_write_execute(nor, to, buf, len); 933 ret = cqspi_indirect_write_execute(nor, to, buf, len);
934 }
933 if (ret) 935 if (ret)
934 return ret; 936 return ret;
935 937
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d5c15e8bb3de..f273af136fc7 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -173,7 +173,7 @@ config SUNLANCE
173 173
174config AMD_XGBE 174config AMD_XGBE
175 tristate "AMD 10GbE Ethernet driver" 175 tristate "AMD 10GbE Ethernet driver"
176 depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA 176 depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
177 depends on X86 || ARM64 || COMPILE_TEST 177 depends on X86 || ARM64 || COMPILE_TEST
178 select BITREVERSE 178 select BITREVERSE
179 select CRC32 179 select CRC32
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
index 1205861b6318..eedd3f3dd22e 100644
--- a/drivers/net/ethernet/apm/xgene-v2/Kconfig
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -1,6 +1,5 @@
1config NET_XGENE_V2 1config NET_XGENE_V2
2 tristate "APM X-Gene SoC Ethernet-v2 Driver" 2 tristate "APM X-Gene SoC Ethernet-v2 Driver"
3 depends on HAS_DMA
4 depends on ARCH_XGENE || COMPILE_TEST 3 depends on ARCH_XGENE || COMPILE_TEST
5 help 4 help
6 This is the Ethernet driver for the on-chip ethernet interface 5 This is the Ethernet driver for the on-chip ethernet interface
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index afccb033177b..e4e33c900b57 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,6 +1,5 @@
1config NET_XGENE 1config NET_XGENE
2 tristate "APM X-Gene SoC Ethernet Driver" 2 tristate "APM X-Gene SoC Ethernet Driver"
3 depends on HAS_DMA
4 depends on ARCH_XGENE || COMPILE_TEST 3 depends on ARCH_XGENE || COMPILE_TEST
5 select PHYLIB 4 select PHYLIB
6 select MDIO_XGENE 5 select MDIO_XGENE
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index e743ddf46343..5d0ab8e74b68 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
24config ARC_EMAC 24config ARC_EMAC
25 tristate "ARC EMAC support" 25 tristate "ARC EMAC support"
26 select ARC_EMAC_CORE 26 select ARC_EMAC_CORE
27 depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST) 27 depends on OF_IRQ && OF_NET
28 depends on ARC || COMPILE_TEST
28 ---help--- 29 ---help---
29 On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x 30 On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
30 non-standard on-chip ethernet device ARC EMAC 10/100 is used. 31 non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
33config EMAC_ROCKCHIP 34config EMAC_ROCKCHIP
34 tristate "Rockchip EMAC support" 35 tristate "Rockchip EMAC support"
35 select ARC_EMAC_CORE 36 select ARC_EMAC_CORE
36 depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST) 37 depends on OF_IRQ && OF_NET && REGULATOR
38 depends on ARCH_ROCKCHIP || COMPILE_TEST
37 ---help--- 39 ---help---
38 Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers. 40 Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
39 This selects Rockchip SoC glue layer support for the 41 This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 567ee54504bc..5e5022fa1d04 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
1897 struct pci_dev *pdev = to_pci_dev(dev); 1897 struct pci_dev *pdev = to_pci_dev(dev);
1898 struct alx_priv *alx = pci_get_drvdata(pdev); 1898 struct alx_priv *alx = pci_get_drvdata(pdev);
1899 struct alx_hw *hw = &alx->hw; 1899 struct alx_hw *hw = &alx->hw;
1900 int err;
1900 1901
1901 alx_reset_phy(hw); 1902 alx_reset_phy(hw);
1902 1903
1903 if (!netif_running(alx->dev)) 1904 if (!netif_running(alx->dev))
1904 return 0; 1905 return 0;
1905 netif_device_attach(alx->dev); 1906 netif_device_attach(alx->dev);
1906 return __alx_open(alx, true); 1907
1908 rtnl_lock();
1909 err = __alx_open(alx, true);
1910 rtnl_unlock();
1911
1912 return err;
1907} 1913}
1908 1914
1909static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); 1915static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index af75156919ed..4c3bfde6e8de 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -157,7 +157,6 @@ config BGMAC
157config BGMAC_BCMA 157config BGMAC_BCMA
158 tristate "Broadcom iProc GBit BCMA support" 158 tristate "Broadcom iProc GBit BCMA support"
159 depends on BCMA && BCMA_HOST_SOC 159 depends on BCMA && BCMA_HOST_SOC
160 depends on HAS_DMA
161 depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST 160 depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
162 select BGMAC 161 select BGMAC
163 select PHYLIB 162 select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
170 169
171config BGMAC_PLATFORM 170config BGMAC_PLATFORM
172 tristate "Broadcom iProc GBit platform support" 171 tristate "Broadcom iProc GBit platform support"
173 depends on HAS_DMA
174 depends on ARCH_BCM_IPROC || COMPILE_TEST 172 depends on ARCH_BCM_IPROC || COMPILE_TEST
175 depends on OF 173 depends on OF
176 select BGMAC 174 select BGMAC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d847e1b9c37b..be1506169076 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1533,6 +1533,7 @@ struct bnx2x {
1533 struct link_vars link_vars; 1533 struct link_vars link_vars;
1534 u32 link_cnt; 1534 u32 link_cnt;
1535 struct bnx2x_link_report_data last_reported_link; 1535 struct bnx2x_link_report_data last_reported_link;
1536 bool force_link_down;
1536 1537
1537 struct mdio_if_info mdio; 1538 struct mdio_if_info mdio;
1538 1539
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8cd73ff5debc..af7b5a4d8ba0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
1261{ 1261{
1262 struct bnx2x_link_report_data cur_data; 1262 struct bnx2x_link_report_data cur_data;
1263 1263
1264 if (bp->force_link_down) {
1265 bp->link_vars.link_up = 0;
1266 return;
1267 }
1268
1264 /* reread mf_cfg */ 1269 /* reread mf_cfg */
1265 if (IS_PF(bp) && !CHIP_IS_E1(bp)) 1270 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1266 bnx2x_read_mf_cfg(bp); 1271 bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2817 bp->pending_max = 0; 2822 bp->pending_max = 0;
2818 } 2823 }
2819 2824
2825 bp->force_link_down = false;
2820 if (bp->port.pmf) { 2826 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode); 2827 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc) 2828 if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5b1ed240bf18..57348f2b49a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
10279 bp->sp_rtnl_state = 0; 10279 bp->sp_rtnl_state = 0;
10280 smp_mb(); 10280 smp_mb();
10281 10281
10282 /* Immediately indicate link as down */
10283 bp->link_vars.link_up = 0;
10284 bp->force_link_down = true;
10285 netif_carrier_off(bp->dev);
10286 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10287
10282 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10288 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10283 /* When ret value shows failure of allocation failure, 10289 /* When ret value shows failure of allocation failure,
10284 * the nic is rebooted again. If open still fails, a error 10290 * the nic is rebooted again. If open still fails, a error
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 30273a7717e2..4fd829b5e65d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
660 id_tbl->max = size; 660 id_tbl->max = size;
661 id_tbl->next = next; 661 id_tbl->next = next;
662 spin_lock_init(&id_tbl->lock); 662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); 663 id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
664 if (!id_tbl->table) 664 if (!id_tbl->table)
665 return -ENOMEM; 665 return -ENOMEM;
666 666
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3e93df5d4e3b..96cc03a6d942 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev)
3726 int err; 3726 int err;
3727 u32 reg; 3727 u32 reg;
3728 3728
3729 bp->queues[0].bp = bp;
3730
3729 dev->netdev_ops = &at91ether_netdev_ops; 3731 dev->netdev_ops = &at91ether_netdev_ops;
3730 dev->ethtool_ops = &macb_ethtool_ops; 3732 dev->ethtool_ops = &macb_ethtool_ops;
3731 3733
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 2220c771092b..678835136bf8 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
170 170
171 if (delta > TSU_NSEC_MAX_VAL) { 171 if (delta > TSU_NSEC_MAX_VAL) {
172 gem_tsu_get_time(&bp->ptp_clock_info, &now); 172 gem_tsu_get_time(&bp->ptp_clock_info, &now);
173 if (sign) 173 now = timespec64_add(now, then);
174 now = timespec64_sub(now, then);
175 else
176 now = timespec64_add(now, then);
177 174
178 gem_tsu_set_time(&bp->ptp_clock_info, 175 gem_tsu_set_time(&bp->ptp_clock_info,
179 (const struct timespec64 *)&now); 176 (const struct timespec64 *)&now);
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 07d2201530d2..9fdd496b90ff 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,6 @@
1config NET_CALXEDA_XGMAC 1config NET_CALXEDA_XGMAC
2 tristate "Calxeda 1G/10G XGMAC Ethernet driver" 2 tristate "Calxeda 1G/10G XGMAC Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA 3 depends on HAS_IOMEM
4 depends on ARCH_HIGHBANK || COMPILE_TEST 4 depends on ARCH_HIGHBANK || COMPILE_TEST
5 select CRC32 5 select CRC32
6 help 6 help
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dd04a2f89ce6..bc03c175a3cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
263 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", 263 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
264 enable ? "set" : "unset", pi->port_id, i, -err); 264 enable ? "set" : "unset", pi->port_id, i, -err);
265 else 265 else
266 txq->dcb_prio = value; 266 txq->dcb_prio = enable ? value : 0;
267 } 267 }
268} 268}
269 269
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 973c1fb70d09..99038dfc7fbe 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
79 enic->rfs_h.max = enic->config.num_arfs; 79 enic->rfs_h.max = enic->config.num_arfs;
80 enic->rfs_h.free = enic->rfs_h.max; 80 enic->rfs_h.free = enic->rfs_h.max;
81 enic->rfs_h.toclean = 0; 81 enic->rfs_h.toclean = 0;
82 enic_rfs_timer_start(enic);
83} 82}
84 83
85void enic_rfs_flw_tbl_free(struct enic *enic) 84void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
88 87
89 enic_rfs_timer_stop(enic); 88 enic_rfs_timer_stop(enic);
90 spin_lock_bh(&enic->rfs_h.lock); 89 spin_lock_bh(&enic->rfs_h.lock);
91 enic->rfs_h.free = 0;
92 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { 90 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
93 struct hlist_head *hhead; 91 struct hlist_head *hhead;
94 struct hlist_node *tmp; 92 struct hlist_node *tmp;
@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
99 enic_delfltr(enic, n->fltr_id); 97 enic_delfltr(enic, n->fltr_id);
100 hlist_del(&n->node); 98 hlist_del(&n->node);
101 kfree(n); 99 kfree(n);
100 enic->rfs_h.free++;
102 } 101 }
103 } 102 }
104 spin_unlock_bh(&enic->rfs_h.lock); 103 spin_unlock_bh(&enic->rfs_h.lock);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 30d2eaa18c04..90c645b8538e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
1920{ 1920{
1921 struct enic *enic = netdev_priv(netdev); 1921 struct enic *enic = netdev_priv(netdev);
1922 unsigned int i; 1922 unsigned int i;
1923 int err; 1923 int err, ret;
1924 1924
1925 err = enic_request_intr(enic); 1925 err = enic_request_intr(enic);
1926 if (err) { 1926 if (err) {
@@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev)
1971 vnic_intr_unmask(&enic->intr[i]); 1971 vnic_intr_unmask(&enic->intr[i]);
1972 1972
1973 enic_notify_timer_start(enic); 1973 enic_notify_timer_start(enic);
1974 enic_rfs_flw_tbl_init(enic); 1974 enic_rfs_timer_start(enic);
1975 1975
1976 return 0; 1976 return 0;
1977 1977
1978err_out_free_rq: 1978err_out_free_rq:
1979 for (i = 0; i < enic->rq_count; i++) { 1979 for (i = 0; i < enic->rq_count; i++) {
1980 err = vnic_rq_disable(&enic->rq[i]); 1980 ret = vnic_rq_disable(&enic->rq[i]);
1981 if (err) 1981 if (!ret)
1982 return err; 1982 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1983 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1984 } 1983 }
1985 enic_dev_notify_unset(enic); 1984 enic_dev_notify_unset(enic);
1986err_out_free_intr: 1985err_out_free_intr:
@@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2904 2903
2905 timer_setup(&enic->notify_timer, enic_notify_timer, 0); 2904 timer_setup(&enic->notify_timer, enic_notify_timer, 0);
2906 2905
2906 enic_rfs_flw_tbl_init(enic);
2907 enic_set_rx_coal_setting(enic); 2907 enic_set_rx_coal_setting(enic);
2908 INIT_WORK(&enic->reset, enic_reset); 2908 INIT_WORK(&enic->reset, enic_reset);
2909 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); 2909 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 78db8e62a83f..ed6c76d20b45 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
1735 if (unlikely(nd->state != ncsi_dev_state_functional)) 1735 if (unlikely(nd->state != ncsi_dev_state_functional))
1736 return; 1736 return;
1737 1737
1738 netdev_info(nd->dev, "NCSI interface %s\n", 1738 netdev_dbg(nd->dev, "NCSI interface %s\n",
1739 nd->link_up ? "up" : "down"); 1739 nd->link_up ? "up" : "down");
1740} 1740}
1741 1741
1742static void ftgmac100_setup_clk(struct ftgmac100 *priv) 1742static void ftgmac100_setup_clk(struct ftgmac100 *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 5f4e1ffa7b95..ab02057ac730 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
125/* Default alignment for start of data in an Rx FD */ 125/* Default alignment for start of data in an Rx FD */
126#define DPAA_FD_DATA_ALIGNMENT 16 126#define DPAA_FD_DATA_ALIGNMENT 16
127 127
128/* The DPAA requires 256 bytes reserved and mapped for the SGT */
129#define DPAA_SGT_SIZE 256
130
128/* Values for the L3R field of the FM Parse Results 131/* Values for the L3R field of the FM Parse Results
129 */ 132 */
130/* L3 Type field: First IP Present IPv4 */ 133/* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1617 1620
1618 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { 1621 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1619 nr_frags = skb_shinfo(skb)->nr_frags; 1622 nr_frags = skb_shinfo(skb)->nr_frags;
1620 dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + 1623 dma_unmap_single(dev, addr,
1621 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1624 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1622 dma_dir); 1625 dma_dir);
1623 1626
1624 /* The sgt buffer has been allocated with netdev_alloc_frag(), 1627 /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1903 void *sgt_buf; 1906 void *sgt_buf;
1904 1907
1905 /* get a page frag to store the SGTable */ 1908 /* get a page frag to store the SGTable */
1906 sz = SKB_DATA_ALIGN(priv->tx_headroom + 1909 sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
1907 sizeof(struct qm_sg_entry) * (1 + nr_frags));
1908 sgt_buf = netdev_alloc_frag(sz); 1910 sgt_buf = netdev_alloc_frag(sz);
1909 if (unlikely(!sgt_buf)) { 1911 if (unlikely(!sgt_buf)) {
1910 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", 1912 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1972 skbh = (struct sk_buff **)buffer_start; 1974 skbh = (struct sk_buff **)buffer_start;
1973 *skbh = skb; 1975 *skbh = skb;
1974 1976
1975 addr = dma_map_single(dev, buffer_start, priv->tx_headroom + 1977 addr = dma_map_single(dev, buffer_start,
1976 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1978 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1977 dma_dir);
1978 if (unlikely(dma_mapping_error(dev, addr))) { 1979 if (unlikely(dma_mapping_error(dev, addr))) {
1979 dev_err(dev, "DMA mapping failed"); 1980 dev_err(dev, "DMA mapping failed");
1980 err = -EINVAL; 1981 err = -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ce6e24c74978..ecbf6187e13a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
324#define HWP_HXS_PHE_REPORT 0x00000800 324#define HWP_HXS_PHE_REPORT 0x00000800
325#define HWP_HXS_PCAC_PSTAT 0x00000100 325#define HWP_HXS_PCAC_PSTAT 0x00000100
326#define HWP_HXS_PCAC_PSTOP 0x00000001 326#define HWP_HXS_PCAC_PSTOP 0x00000001
327#define HWP_HXS_TCP_OFFSET 0xA
328#define HWP_HXS_UDP_OFFSET 0xB
329#define HWP_HXS_SH_PAD_REM 0x80000000
330
327struct fman_port_hwp_regs { 331struct fman_port_hwp_regs {
328 struct { 332 struct {
329 u32 ssa; /* Soft Sequence Attachment */ 333 u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
728 iowrite32be(0xffffffff, &regs->pmda[i].lcv); 732 iowrite32be(0xffffffff, &regs->pmda[i].lcv);
729 } 733 }
730 734
735 /* Short packet padding removal from checksum calculation */
736 iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
737 iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
738
731 start_port_hwp(port); 739 start_port_hwp(port);
732} 740}
733 741
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 8bcf470ff5f3..fb1a7251f45d 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_HISILICON 5config NET_VENDOR_HISILICON
6 bool "Hisilicon devices" 6 bool "Hisilicon devices"
7 default y 7 default y
8 depends on (OF || ACPI) && HAS_DMA 8 depends on OF || ACPI
9 depends on ARM || ARM64 || COMPILE_TEST 9 depends on ARM || ARM64 || COMPILE_TEST
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y. 11 If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index e2e5cdc7119c..4c0f7eda1166 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
439{ 439{
440 struct hinic_rq *rq = rxq->rq; 440 struct hinic_rq *rq = rxq->rq;
441 441
442 irq_set_affinity_hint(rq->irq, NULL);
442 free_irq(rq->irq, rxq); 443 free_irq(rq->irq, rxq);
443 rx_del_napi(rxq); 444 rx_del_napi(rxq);
444} 445}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 8ffb7454e67c..b151ae316546 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2103 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 2103 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2104#else 2104#else
2105 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 2105 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2106 SKB_DATA_ALIGN(I40E_SKB_PAD + 2106 SKB_DATA_ALIGN(xdp->data_end -
2107 (xdp->data_end - 2107 xdp->data_hard_start);
2108 xdp->data_hard_start));
2109#endif 2108#endif
2110 struct sk_buff *skb; 2109 struct sk_buff *skb;
2111 2110
@@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2124 return NULL; 2123 return NULL;
2125 2124
2126 /* update pointers within the skb to store the data */ 2125 /* update pointers within the skb to store the data */
2127 skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start)); 2126 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2128 __skb_put(skb, xdp->data_end - xdp->data); 2127 __skb_put(skb, xdp->data_end - xdp->data);
2129 if (metasize) 2128 if (metasize)
2130 skb_metadata_set(skb, metasize); 2129 skb_metadata_set(skb, metasize);
@@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2200 return true; 2199 return true;
2201} 2200}
2202 2201
2203#define I40E_XDP_PASS 0 2202#define I40E_XDP_PASS 0
2204#define I40E_XDP_CONSUMED 1 2203#define I40E_XDP_CONSUMED BIT(0)
2205#define I40E_XDP_TX 2 2204#define I40E_XDP_TX BIT(1)
2205#define I40E_XDP_REDIR BIT(2)
2206 2206
2207static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2207static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2208 struct i40e_ring *xdp_ring); 2208 struct i40e_ring *xdp_ring);
@@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2249 break; 2249 break;
2250 case XDP_REDIRECT: 2250 case XDP_REDIRECT:
2251 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2251 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2252 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; 2252 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2253 break; 2253 break;
2254 default: 2254 default:
2255 bpf_warn_invalid_xdp_action(act); 2255 bpf_warn_invalid_xdp_action(act);
@@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2312 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2312 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2313 struct sk_buff *skb = rx_ring->skb; 2313 struct sk_buff *skb = rx_ring->skb;
2314 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2314 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2315 bool failure = false, xdp_xmit = false; 2315 unsigned int xdp_xmit = 0;
2316 bool failure = false;
2316 struct xdp_buff xdp; 2317 struct xdp_buff xdp;
2317 2318
2318 xdp.rxq = &rx_ring->xdp_rxq; 2319 xdp.rxq = &rx_ring->xdp_rxq;
@@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2373 } 2374 }
2374 2375
2375 if (IS_ERR(skb)) { 2376 if (IS_ERR(skb)) {
2376 if (PTR_ERR(skb) == -I40E_XDP_TX) { 2377 unsigned int xdp_res = -PTR_ERR(skb);
2377 xdp_xmit = true; 2378
2379 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2380 xdp_xmit |= xdp_res;
2378 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); 2381 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2379 } else { 2382 } else {
2380 rx_buffer->pagecnt_bias++; 2383 rx_buffer->pagecnt_bias++;
@@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2428 total_rx_packets++; 2431 total_rx_packets++;
2429 } 2432 }
2430 2433
2431 if (xdp_xmit) { 2434 if (xdp_xmit & I40E_XDP_REDIR)
2435 xdp_do_flush_map();
2436
2437 if (xdp_xmit & I40E_XDP_TX) {
2432 struct i40e_ring *xdp_ring = 2438 struct i40e_ring *xdp_ring =
2433 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2439 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2434 2440
2435 i40e_xdp_ring_update_tail(xdp_ring); 2441 i40e_xdp_ring_update_tail(xdp_ring);
2436 xdp_do_flush_map();
2437 } 2442 }
2438 2443
2439 rx_ring->skb = skb; 2444 rx_ring->skb = skb;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e87dbbc9024..62e57b05a0ae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2186 return skb; 2186 return skb;
2187} 2187}
2188 2188
2189#define IXGBE_XDP_PASS 0 2189#define IXGBE_XDP_PASS 0
2190#define IXGBE_XDP_CONSUMED 1 2190#define IXGBE_XDP_CONSUMED BIT(0)
2191#define IXGBE_XDP_TX 2 2191#define IXGBE_XDP_TX BIT(1)
2192#define IXGBE_XDP_REDIR BIT(2)
2192 2193
2193static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, 2194static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2194 struct xdp_frame *xdpf); 2195 struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2225 case XDP_REDIRECT: 2226 case XDP_REDIRECT:
2226 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); 2227 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2227 if (!err) 2228 if (!err)
2228 result = IXGBE_XDP_TX; 2229 result = IXGBE_XDP_REDIR;
2229 else 2230 else
2230 result = IXGBE_XDP_CONSUMED; 2231 result = IXGBE_XDP_CONSUMED;
2231 break; 2232 break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2285 unsigned int mss = 0; 2286 unsigned int mss = 0;
2286#endif /* IXGBE_FCOE */ 2287#endif /* IXGBE_FCOE */
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2288 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2288 bool xdp_xmit = false; 2289 unsigned int xdp_xmit = 0;
2289 struct xdp_buff xdp; 2290 struct xdp_buff xdp;
2290 2291
2291 xdp.rxq = &rx_ring->xdp_rxq; 2292 xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2328 } 2329 }
2329 2330
2330 if (IS_ERR(skb)) { 2331 if (IS_ERR(skb)) {
2331 if (PTR_ERR(skb) == -IXGBE_XDP_TX) { 2332 unsigned int xdp_res = -PTR_ERR(skb);
2332 xdp_xmit = true; 2333
2334 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2335 xdp_xmit |= xdp_res;
2333 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); 2336 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2334 } else { 2337 } else {
2335 rx_buffer->pagecnt_bias++; 2338 rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2401 total_rx_packets++; 2404 total_rx_packets++;
2402 } 2405 }
2403 2406
2404 if (xdp_xmit) { 2407 if (xdp_xmit & IXGBE_XDP_REDIR)
2408 xdp_do_flush_map();
2409
2410 if (xdp_xmit & IXGBE_XDP_TX) {
2405 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; 2411 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2406 2412
2407 /* Force memory writes to complete before letting h/w 2413 /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2409 */ 2415 */
2410 wmb(); 2416 wmb();
2411 writel(ring->next_to_use, ring->tail); 2417 writel(ring->next_to_use, ring->tail);
2412
2413 xdp_do_flush_map();
2414 } 2418 }
2415 2419
2416 u64_stats_update_begin(&rx_ring->syncp); 2420 u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index cc2f7701e71e..f33fd22b351c 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
18 18
19config MV643XX_ETH 19config MV643XX_ETH
20 tristate "Marvell Discovery (643XX) and Orion ethernet support" 20 tristate "Marvell Discovery (643XX) and Orion ethernet support"
21 depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET 21 depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
22 depends on HAS_DMA 22 depends on INET
23 select PHYLIB 23 select PHYLIB
24 select MVMDIO 24 select MVMDIO
25 ---help--- 25 ---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
58config MVNETA 58config MVNETA
59 tristate "Marvell Armada 370/38x/XP/37xx network interface support" 59 tristate "Marvell Armada 370/38x/XP/37xx network interface support"
60 depends on ARCH_MVEBU || COMPILE_TEST 60 depends on ARCH_MVEBU || COMPILE_TEST
61 depends on HAS_DMA
62 select MVMDIO 61 select MVMDIO
63 select PHYLINK 62 select PHYLINK
64 ---help--- 63 ---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
84config MVPP2 83config MVPP2
85 tristate "Marvell Armada 375/7K/8K network interface support" 84 tristate "Marvell Armada 375/7K/8K network interface support"
86 depends on ARCH_MVEBU || COMPILE_TEST 85 depends on ARCH_MVEBU || COMPILE_TEST
87 depends on HAS_DMA
88 select MVMDIO 86 select MVMDIO
89 select PHYLINK 87 select PHYLINK
90 ---help--- 88 ---help---
@@ -93,7 +91,7 @@ config MVPP2
93 91
94config PXA168_ETH 92config PXA168_ETH
95 tristate "Marvell pxa168 ethernet support" 93 tristate "Marvell pxa168 ethernet support"
96 depends on HAS_IOMEM && HAS_DMA 94 depends on HAS_IOMEM
97 depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST 95 depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
98 select PHYLIB 96 select PHYLIB
99 ---help--- 97 ---help---
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 17a904cc6a5e..0ad2f3f7da85 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1932 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1932 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1933 index = rx_desc - rxq->descs; 1933 index = rx_desc - rxq->descs;
1934 data = rxq->buf_virt_addr[index]; 1934 data = rxq->buf_virt_addr[index];
1935 phys_addr = rx_desc->buf_phys_addr; 1935 phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
1936 1936
1937 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1937 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1938 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1938 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 487388aed98f..384c1fa49081 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
807 unsigned long flags; 807 unsigned long flags;
808 bool poll_cmd = ent->polling; 808 bool poll_cmd = ent->polling;
809 int alloc_ret; 809 int alloc_ret;
810 int cmd_mode;
810 811
811 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 812 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
812 down(sem); 813 down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
853 set_signature(ent, !cmd->checksum_disabled); 854 set_signature(ent, !cmd->checksum_disabled);
854 dump_command(dev, ent, 1); 855 dump_command(dev, ent, 1);
855 ent->ts1 = ktime_get_ns(); 856 ent->ts1 = ktime_get_ns();
857 cmd_mode = cmd->mode;
856 858
857 if (ent->callback) 859 if (ent->callback)
858 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 860 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
877 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 879 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
878 mmiowb(); 880 mmiowb();
879 /* if not in polling don't use ent after this point */ 881 /* if not in polling don't use ent after this point */
880 if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { 882 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
881 poll_timeout(ent); 883 poll_timeout(ent);
882 /* make sure we read the descriptor after ownership is SW */ 884 /* make sure we read the descriptor after ownership is SW */
883 rmb(); 885 rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1276{ 1278{
1277 struct mlx5_core_dev *dev = filp->private_data; 1279 struct mlx5_core_dev *dev = filp->private_data;
1278 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1280 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1279 char outlen_str[8]; 1281 char outlen_str[8] = {0};
1280 int outlen; 1282 int outlen;
1281 void *ptr; 1283 void *ptr;
1282 int err; 1284 int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1291 if (copy_from_user(outlen_str, buf, count)) 1293 if (copy_from_user(outlen_str, buf, count))
1292 return -EFAULT; 1294 return -EFAULT;
1293 1295
1294 outlen_str[7] = 0;
1295
1296 err = sscanf(outlen_str, "%d", &outlen); 1296 err = sscanf(outlen_str, "%d", &outlen);
1297 if (err < 0) 1297 if (err < 0)
1298 return err; 1298 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 56c1b6f5593e..dae4156a710d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2846 mlx5e_activate_channels(&priv->channels); 2846 mlx5e_activate_channels(&priv->channels);
2847 netif_tx_start_all_queues(priv->netdev); 2847 netif_tx_start_all_queues(priv->netdev);
2848 2848
2849 if (MLX5_VPORT_MANAGER(priv->mdev)) 2849 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2850 mlx5e_add_sqs_fwd_rules(priv); 2850 mlx5e_add_sqs_fwd_rules(priv);
2851 2851
2852 mlx5e_wait_channels_min_rx_wqes(&priv->channels); 2852 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2857{ 2857{
2858 mlx5e_redirect_rqts_to_drop(priv); 2858 mlx5e_redirect_rqts_to_drop(priv);
2859 2859
2860 if (MLX5_VPORT_MANAGER(priv->mdev)) 2860 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2861 mlx5e_remove_sqs_fwd_rules(priv); 2861 mlx5e_remove_sqs_fwd_rules(priv);
2862 2862
2863 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when 2863 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4597 mlx5e_set_netdev_dev_addr(netdev); 4597 mlx5e_set_netdev_dev_addr(netdev);
4598 4598
4599#if IS_ENABLED(CONFIG_MLX5_ESWITCH) 4599#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4600 if (MLX5_VPORT_MANAGER(mdev)) 4600 if (MLX5_ESWITCH_MANAGER(mdev))
4601 netdev->switchdev_ops = &mlx5e_switchdev_ops; 4601 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4602#endif 4602#endif
4603 4603
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4753 4753
4754 mlx5e_enable_async_events(priv); 4754 mlx5e_enable_async_events(priv);
4755 4755
4756 if (MLX5_VPORT_MANAGER(priv->mdev)) 4756 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4757 mlx5e_register_vport_reps(priv); 4757 mlx5e_register_vport_reps(priv);
4758 4758
4759 if (netdev->reg_state != NETREG_REGISTERED) 4759 if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4788 4788
4789 queue_work(priv->wq, &priv->set_rx_mode_work); 4789 queue_work(priv->wq, &priv->set_rx_mode_work);
4790 4790
4791 if (MLX5_VPORT_MANAGER(priv->mdev)) 4791 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4792 mlx5e_unregister_vport_reps(priv); 4792 mlx5e_unregister_vport_reps(priv);
4793 4793
4794 mlx5e_disable_async_events(priv); 4794 mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
4972 return NULL; 4972 return NULL;
4973 4973
4974#ifdef CONFIG_MLX5_ESWITCH 4974#ifdef CONFIG_MLX5_ESWITCH
4975 if (MLX5_VPORT_MANAGER(mdev)) { 4975 if (MLX5_ESWITCH_MANAGER(mdev)) {
4976 rpriv = mlx5e_alloc_nic_rep_priv(mdev); 4976 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4977 if (!rpriv) { 4977 if (!rpriv) {
4978 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); 4978 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 57987f6546e8..2b8040a3cdbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
823 struct mlx5e_rep_priv *rpriv = priv->ppriv; 823 struct mlx5e_rep_priv *rpriv = priv->ppriv;
824 struct mlx5_eswitch_rep *rep; 824 struct mlx5_eswitch_rep *rep;
825 825
826 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) 826 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
827 return false; 827 return false;
828 828
829 rep = rpriv->rep; 829 rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) 837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
838{ 838{
839 struct mlx5e_rep_priv *rpriv = priv->ppriv; 839 struct mlx5e_rep_priv *rpriv = priv->ppriv;
840 struct mlx5_eswitch_rep *rep = rpriv->rep; 840 struct mlx5_eswitch_rep *rep;
841 841
842 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
843 return false;
844
845 rep = rpriv->rep;
842 if (rep && rep->vport != FDB_UPLINK_VPORT) 846 if (rep && rep->vport != FDB_UPLINK_VPORT)
843 return true; 847 return true;
844 848
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f63dfbcd29fe..b79d74860a30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1594} 1594}
1595 1595
1596/* Public E-Switch API */ 1596/* Public E-Switch API */
1597#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) 1597#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1598
1598 1599
1599int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) 1600int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1600{ 1601{
1601 int err; 1602 int err;
1602 int i, enabled_events; 1603 int i, enabled_events;
1603 1604
1604 if (!ESW_ALLOWED(esw)) 1605 if (!ESW_ALLOWED(esw) ||
1605 return 0;
1606
1607 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1608 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1606 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1609 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1607 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1610 return -EOPNOTSUPP; 1608 return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1806 u64 node_guid; 1804 u64 node_guid;
1807 int err = 0; 1805 int err = 0;
1808 1806
1809 if (!ESW_ALLOWED(esw)) 1807 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
1810 return -EPERM; 1808 return -EPERM;
1811 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) 1809 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1812 return -EINVAL; 1810 return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1883{ 1881{
1884 struct mlx5_vport *evport; 1882 struct mlx5_vport *evport;
1885 1883
1886 if (!ESW_ALLOWED(esw)) 1884 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
1887 return -EPERM; 1885 return -EPERM;
1888 if (!LEGAL_VPORT(esw, vport)) 1886 if (!LEGAL_VPORT(esw, vport))
1889 return -EINVAL; 1887 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cecd201f0b73..91f1209886ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1080 return -EOPNOTSUPP; 1080 return -EOPNOTSUPP;
1081 1081
1082 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1082 if(!MLX5_ESWITCH_MANAGER(dev))
1083 return -EOPNOTSUPP; 1083 return -EPERM;
1084 1084
1085 if (dev->priv.eswitch->mode == SRIOV_NONE) 1085 if (dev->priv.eswitch->mode == SRIOV_NONE)
1086 return -EOPNOTSUPP; 1086 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 49a75d31185e..f1a86cea86a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h> 34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/eswitch.h>
35 36
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "fs_core.h" 38#include "fs_core.h"
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
2652 goto err; 2653 goto err;
2653 } 2654 }
2654 2655
2655 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 2656 if (MLX5_ESWITCH_MANAGER(dev)) {
2656 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { 2657 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2657 err = init_fdb_root_ns(steering); 2658 err = init_fdb_root_ns(steering);
2658 if (err) 2659 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index afd9f4fa22f4..41ad24f0de2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/mlx5/driver.h> 33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/cmd.h> 34#include <linux/mlx5/cmd.h>
35#include <linux/mlx5/eswitch.h>
35#include <linux/module.h> 36#include <linux/module.h>
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "../../mlxfw/mlxfw.h" 38#include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
159 } 160 }
160 161
161 if (MLX5_CAP_GEN(dev, vport_group_manager) && 162 if (MLX5_CAP_GEN(dev, vport_group_manager) &&
162 MLX5_CAP_GEN(dev, eswitch_flow_table)) { 163 MLX5_ESWITCH_MANAGER(dev)) {
163 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); 164 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
164 if (err) 165 if (err)
165 return err; 166 return err;
166 } 167 }
167 168
168 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 169 if (MLX5_ESWITCH_MANAGER(dev)) {
169 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); 170 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
170 if (err) 171 if (err)
171 return err; 172 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 7cb67122e8b5..98359559c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
33#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h> 34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h> 35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/eswitch.h>
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "lib/mpfs.h" 38#include "lib/mpfs.h"
38 39
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
98 int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); 99 int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
99 struct mlx5_mpfs *mpfs; 100 struct mlx5_mpfs *mpfs;
100 101
101 if (!MLX5_VPORT_MANAGER(dev)) 102 if (!MLX5_ESWITCH_MANAGER(dev))
102 return 0; 103 return 0;
103 104
104 mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); 105 mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
122{ 123{
123 struct mlx5_mpfs *mpfs = dev->priv.mpfs; 124 struct mlx5_mpfs *mpfs = dev->priv.mpfs;
124 125
125 if (!MLX5_VPORT_MANAGER(dev)) 126 if (!MLX5_ESWITCH_MANAGER(dev))
126 return; 127 return;
127 128
128 WARN_ON(!hlist_empty(mpfs->hash)); 129 WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
137 u32 index; 138 u32 index;
138 int err; 139 int err;
139 140
140 if (!MLX5_VPORT_MANAGER(dev)) 141 if (!MLX5_ESWITCH_MANAGER(dev))
141 return 0; 142 return 0;
142 143
143 mutex_lock(&mpfs->lock); 144 mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
179 int err = 0; 180 int err = 0;
180 u32 index; 181 u32 index;
181 182
182 if (!MLX5_VPORT_MANAGER(dev)) 183 if (!MLX5_ESWITCH_MANAGER(dev))
183 return 0; 184 return 0;
184 185
185 mutex_lock(&mpfs->lock); 186 mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index fa9d0760dd36..31a9cbd85689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
701static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, 701static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
702 int inlen) 702 int inlen)
703{ 703{
704 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 704 u32 out[MLX5_ST_SZ_DW(qetc_reg)];
705 705
706 if (!MLX5_CAP_GEN(mdev, ets)) 706 if (!MLX5_CAP_GEN(mdev, ets))
707 return -EOPNOTSUPP; 707 return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
713static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, 713static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
714 int outlen) 714 int outlen)
715{ 715{
716 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 716 u32 in[MLX5_ST_SZ_DW(qetc_reg)];
717 717
718 if (!MLX5_CAP_GEN(mdev, ets)) 718 if (!MLX5_CAP_GEN(mdev, ets))
719 return -EOPNOTSUPP; 719 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2a8b529ce6dd..a0674962f02c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
88 return -EBUSY; 88 return -EBUSY;
89 } 89 }
90 90
91 if (!MLX5_ESWITCH_MANAGER(dev))
92 goto enable_vfs_hca;
93
91 err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); 94 err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
92 if (err) { 95 if (err) {
93 mlx5_core_warn(dev, 96 mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
95 return err; 98 return err;
96 } 99 }
97 100
101enable_vfs_hca:
98 for (vf = 0; vf < num_vfs; vf++) { 102 for (vf = 0; vf < num_vfs; vf++) {
99 err = mlx5_core_enable_hca(dev, vf + 1); 103 err = mlx5_core_enable_hca(dev, vf + 1);
100 if (err) { 104 if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
140 } 144 }
141 145
142out: 146out:
143 mlx5_eswitch_disable_sriov(dev->priv.eswitch); 147 if (MLX5_ESWITCH_MANAGER(dev))
148 mlx5_eswitch_disable_sriov(dev->priv.eswitch);
144 149
145 if (mlx5_wait_for_vf_pages(dev)) 150 if (mlx5_wait_for_vf_pages(dev))
146 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); 151 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 719cecb182c6..7eecd5b07bb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
549 return -EINVAL; 549 return -EINVAL;
550 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 550 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
551 return -EACCES; 551 return -EACCES;
552 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
553 return -EOPNOTSUPP;
554 552
555 in = kvzalloc(inlen, GFP_KERNEL); 553 in = kvzalloc(inlen, GFP_KERNEL);
556 if (!in) 554 if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index f4d9c9975ac3..82827a8d3d67 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
30 30
31config MLXSW_PCI 31config MLXSW_PCI
32 tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" 32 tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
33 depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE 33 depends on PCI && HAS_IOMEM && MLXSW_CORE
34 default m 34 default m
35 ---help--- 35 ---help---
36 This is PCI bus implementation for Mellanox Technologies Switch ASICs. 36 This is PCI bus implementation for Mellanox Technologies Switch ASICs.
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index fb2c8f8071e6..776a8a9be8e3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -344,10 +344,9 @@ static int ocelot_port_stop(struct net_device *dev)
344static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) 344static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
345{ 345{
346 ifh[0] = IFH_INJ_BYPASS; 346 ifh[0] = IFH_INJ_BYPASS;
347 ifh[1] = (0xff00 & info->port) >> 8; 347 ifh[1] = (0xf00 & info->port) >> 8;
348 ifh[2] = (0xff & info->port) << 24; 348 ifh[2] = (0xff & info->port) << 24;
349 ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) | 349 ifh[3] = (info->tag_type << 16) | info->vid;
350 (info->tag_type << 16) | info->vid;
351 350
352 return 0; 351 return 0;
353} 352}
@@ -370,11 +369,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
370 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); 369 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
371 370
372 info.port = BIT(port->chip_port); 371 info.port = BIT(port->chip_port);
373 info.cpuq = 0xff; 372 info.tag_type = IFH_TAG_TYPE_C;
373 info.vid = skb_vlan_tag_get(skb);
374 ocelot_gen_ifh(ifh, &info); 374 ocelot_gen_ifh(ifh, &info);
375 375
376 for (i = 0; i < IFH_LEN; i++) 376 for (i = 0; i < IFH_LEN; i++)
377 ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); 377 ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
378 QS_INJ_WR, grp);
378 379
379 count = (skb->len + 3) / 4; 380 count = (skb->len + 3) / 4;
380 last = skb->len % 4; 381 last = skb->len % 4;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index fcdfb8e7fdea..40216d56dddc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
81 81
82 ret = nfp_net_bpf_offload(nn, prog, running, extack); 82 ret = nfp_net_bpf_offload(nn, prog, running, extack);
83 /* Stop offload if replace not possible */ 83 /* Stop offload if replace not possible */
84 if (ret && prog) 84 if (ret)
85 nfp_bpf_xdp_offload(app, nn, NULL, extack); 85 return ret;
86 86
87 nn->dp.bpf_offload_xdp = prog && !ret; 87 nn->dp.bpf_offload_xdp = !!prog;
88 return ret; 88 return ret;
89} 89}
90 90
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
203 return -EOPNOTSUPP; 203 return -EOPNOTSUPP;
204 204
205 if (tcf_block_shared(f->block))
206 return -EOPNOTSUPP;
207
205 switch (f->command) { 208 switch (f->command) {
206 case TC_BLOCK_BIND: 209 case TC_BLOCK_BIND:
207 return tcf_block_cb_register(f->block, 210 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 91935405f586..84f7a5dbea9d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
123 NFP_FLOWER_MASK_MPLS_Q; 123 NFP_FLOWER_MASK_MPLS_Q;
124 124
125 frame->mpls_lse = cpu_to_be32(t_mpls); 125 frame->mpls_lse = cpu_to_be32(t_mpls);
126 } else if (dissector_uses_key(flow->dissector,
127 FLOW_DISSECTOR_KEY_BASIC)) {
128 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
129 * bit, which indicates an mpls ether type but without any
130 * mpls fields.
131 */
132 struct flow_dissector_key_basic *key_basic;
133
134 key_basic = skb_flow_dissector_target(flow->dissector,
135 FLOW_DISSECTOR_KEY_BASIC,
136 flow->key);
137 if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
138 key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
139 frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
126 } 140 }
127} 141}
128 142
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c42e64f32333..525057bee0ed 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
264 case cpu_to_be16(ETH_P_ARP): 264 case cpu_to_be16(ETH_P_ARP):
265 return -EOPNOTSUPP; 265 return -EOPNOTSUPP;
266 266
267 case cpu_to_be16(ETH_P_MPLS_UC):
268 case cpu_to_be16(ETH_P_MPLS_MC):
269 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
270 key_layer |= NFP_FLOWER_LAYER_MAC;
271 key_size += sizeof(struct nfp_flower_mac_mpls);
272 }
273 break;
274
267 /* Will be included in layer 2. */ 275 /* Will be included in layer 2. */
268 case cpu_to_be16(ETH_P_8021Q): 276 case cpu_to_be16(ETH_P_8021Q):
269 break; 277 break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
623 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 631 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
624 return -EOPNOTSUPP; 632 return -EOPNOTSUPP;
625 633
634 if (tcf_block_shared(f->block))
635 return -EOPNOTSUPP;
636
626 switch (f->command) { 637 switch (f->command) {
627 case TC_BLOCK_BIND: 638 case TC_BLOCK_BIND:
628 return tcf_block_cb_register(f->block, 639 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 46b76d5a726c..152283d7e59c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
240 return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); 240 return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
241 241
242 pf->limit_vfs = ~0; 242 pf->limit_vfs = ~0;
243 pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
244 /* Allow any setting for backwards compatibility if symbol not found */ 243 /* Allow any setting for backwards compatibility if symbol not found */
245 if (err == -ENOENT) 244 if (err == -ENOENT)
246 return 0; 245 return 0;
@@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
668 667
669 err = nfp_net_pci_probe(pf); 668 err = nfp_net_pci_probe(pf);
670 if (err) 669 if (err)
671 goto err_sriov_unlimit; 670 goto err_fw_unload;
672 671
673 err = nfp_hwmon_register(pf); 672 err = nfp_hwmon_register(pf);
674 if (err) { 673 if (err) {
@@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
680 679
681err_net_remove: 680err_net_remove:
682 nfp_net_pci_remove(pf); 681 nfp_net_pci_remove(pf);
683err_sriov_unlimit:
684 pci_sriov_set_totalvfs(pf->pdev, 0);
685err_fw_unload: 682err_fw_unload:
686 kfree(pf->rtbl); 683 kfree(pf->rtbl);
687 nfp_mip_close(pf->mip); 684 nfp_mip_close(pf->mip);
@@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev)
715 nfp_hwmon_unregister(pf); 712 nfp_hwmon_unregister(pf);
716 713
717 nfp_pcie_sriov_disable(pdev); 714 nfp_pcie_sriov_disable(pdev);
718 pci_sriov_set_totalvfs(pf->pdev, 0);
719 715
720 nfp_net_pci_remove(pf); 716 nfp_net_pci_remove(pf);
721 717
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index cd34097b79f1..37a6d7822a38 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
232 err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), 232 err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
233 nfp_resource_address(state->res), 233 nfp_resource_address(state->res),
234 fwinf, sizeof(*fwinf)); 234 fwinf, sizeof(*fwinf));
235 if (err < sizeof(*fwinf)) 235 if (err < (int)sizeof(*fwinf))
236 goto err_release; 236 goto err_release;
237 237
238 if (!nffw_res_flg_init_get(fwinf)) 238 if (!nffw_res_flg_init_get(fwinf))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8f31406ec894..e0680ce91328 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
255 *type = DCBX_PROTOCOL_ROCE_V2; 255 *type = DCBX_PROTOCOL_ROCE_V2;
256 } else { 256 } else {
257 *type = DCBX_MAX_PROTOCOL_TYPE; 257 *type = DCBX_MAX_PROTOCOL_TYPE;
258 DP_ERR(p_hwfn, 258 DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
259 "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n", 259 app_prio_bitmap);
260 id, app_prio_bitmap);
261 return false; 260 return false;
262 } 261 }
263 262
@@ -710,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
710 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; 709 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
711 710
712 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, 711 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
713 ARRAY_SIZE(p_local->local_chassis_id)); 712 sizeof(p_local->local_chassis_id));
714 memcpy(params->lldp_local.local_port_id, p_local->local_port_id, 713 memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
715 ARRAY_SIZE(p_local->local_port_id)); 714 sizeof(p_local->local_port_id));
716} 715}
717 716
718static void 717static void
@@ -724,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
724 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; 723 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
725 724
726 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, 725 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
727 ARRAY_SIZE(p_remote->peer_chassis_id)); 726 sizeof(p_remote->peer_chassis_id));
728 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, 727 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
729 ARRAY_SIZE(p_remote->peer_port_id)); 728 sizeof(p_remote->peer_port_id));
730} 729}
731 730
732static int 731static int
@@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
1479 *cap = 0x80; 1478 *cap = 0x80;
1480 break; 1479 break;
1481 case DCB_CAP_ATTR_DCBX: 1480 case DCB_CAP_ATTR_DCBX:
1482 *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE | 1481 *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
1483 DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC); 1482 DCB_CAP_DCBX_STATIC);
1484 break; 1483 break;
1485 default: 1484 default:
1486 *cap = false; 1485 *cap = false;
@@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
1548 if (!dcbx_info) 1547 if (!dcbx_info)
1549 return 0; 1548 return 0;
1550 1549
1551 if (dcbx_info->operational.enabled)
1552 mode |= DCB_CAP_DCBX_LLD_MANAGED;
1553 if (dcbx_info->operational.ieee) 1550 if (dcbx_info->operational.ieee)
1554 mode |= DCB_CAP_DCBX_VER_IEEE; 1551 mode |= DCB_CAP_DCBX_VER_IEEE;
1555 if (dcbx_info->operational.cee) 1552 if (dcbx_info->operational.cee)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 329781cda77f..e5249b4741d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1804 DP_INFO(p_hwfn, "Failed to update driver state\n"); 1804 DP_INFO(p_hwfn, "Failed to update driver state\n");
1805 1805
1806 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 1806 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
1807 QED_OV_ESWITCH_VEB); 1807 QED_OV_ESWITCH_NONE);
1808 if (rc) 1808 if (rc)
1809 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 1809 DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
1810 } 1810 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c97ebd681c47..012973d75ad0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
201 201
202 skb = build_skb(buffer->data, 0); 202 skb = build_skb(buffer->data, 0);
203 if (!skb) { 203 if (!skb) {
204 rc = -ENOMEM; 204 DP_INFO(cdev, "Failed to build SKB\n");
205 goto out_post; 205 kfree(buffer->data);
206 goto out_post1;
206 } 207 }
207 208
208 data->u.placement_offset += NET_SKB_PAD; 209 data->u.placement_offset += NET_SKB_PAD;
@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
224 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, 225 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
225 data->opaque_data_0, 226 data->opaque_data_0,
226 data->opaque_data_1); 227 data->opaque_data_1);
228 } else {
229 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
230 QED_MSG_LL2 | QED_MSG_STORAGE),
231 "Dropping the packet\n");
232 kfree(buffer->data);
227 } 233 }
228 234
235out_post1:
229 /* Update Buffer information and update FW producer */ 236 /* Update Buffer information and update FW producer */
230 buffer->data = new_data; 237 buffer->data = new_data;
231 buffer->phys_addr = new_phys_addr; 238 buffer->phys_addr = new_phys_addr;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index b04d57ca5176..0cbc74d6ca8b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
567 /* Fastpath interrupts */ 567 /* Fastpath interrupts */
568 for (j = 0; j < 64; j++) { 568 for (j = 0; j < 64; j++) {
569 if ((0x2ULL << j) & status) { 569 if ((0x2ULL << j) & status) {
570 hwfn->simd_proto_handler[j].func( 570 struct qed_simd_fp_handler *p_handler =
571 hwfn->simd_proto_handler[j].token); 571 &hwfn->simd_proto_handler[j];
572
573 if (p_handler->func)
574 p_handler->func(p_handler->token);
575 else
576 DP_NOTICE(hwfn,
577 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
578 j, status);
579
572 status &= ~(0x2ULL << j); 580 status &= ~(0x2ULL << j);
573 rc = IRQ_HANDLED; 581 rc = IRQ_HANDLED;
574 } 582 }
@@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
781 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
782 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 790 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
783 791
792 if (is_kdump_kernel()) {
793 DP_INFO(cdev,
794 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
795 cdev->int_params.in.min_msix_cnt);
796 cdev->int_params.in.num_vectors =
797 cdev->int_params.in.min_msix_cnt;
798 }
799
784 rc = qed_set_int_mode(cdev, false); 800 rc = qed_set_int_mode(cdev, false);
785 if (rc) { 801 if (rc) {
786 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 802 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f01bf52bc381..fd59cf45f4be 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4513static int qed_sriov_enable(struct qed_dev *cdev, int num) 4513static int qed_sriov_enable(struct qed_dev *cdev, int num)
4514{ 4514{
4515 struct qed_iov_vf_init_params params; 4515 struct qed_iov_vf_init_params params;
4516 struct qed_hwfn *hwfn;
4517 struct qed_ptt *ptt;
4516 int i, j, rc; 4518 int i, j, rc;
4517 4519
4518 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 4520 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
4525 4527
4526 /* Initialize HW for VF access */ 4528 /* Initialize HW for VF access */
4527 for_each_hwfn(cdev, j) { 4529 for_each_hwfn(cdev, j) {
4528 struct qed_hwfn *hwfn = &cdev->hwfns[j]; 4530 hwfn = &cdev->hwfns[j];
4529 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 4531 ptt = qed_ptt_acquire(hwfn);
4530 4532
4531 /* Make sure not to use more than 16 queues per VF */ 4533 /* Make sure not to use more than 16 queues per VF */
4532 params.num_queues = min_t(int, 4534 params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
4562 goto err; 4564 goto err;
4563 } 4565 }
4564 4566
4567 hwfn = QED_LEADING_HWFN(cdev);
4568 ptt = qed_ptt_acquire(hwfn);
4569 if (!ptt) {
4570 DP_ERR(hwfn, "Failed to acquire ptt\n");
4571 rc = -EBUSY;
4572 goto err;
4573 }
4574
4575 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4576 if (rc)
4577 DP_INFO(cdev, "Failed to update eswitch mode\n");
4578 qed_ptt_release(hwfn, ptt);
4579
4565 return num; 4580 return num;
4566 4581
4567err: 4582err:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 02adb513f475..013ff567283c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
337{ 337{
338 struct qede_ptp *ptp = edev->ptp; 338 struct qede_ptp *ptp = edev->ptp;
339 339
340 if (!ptp) 340 if (!ptp) {
341 return -EIO; 341 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
342 SOF_TIMESTAMPING_RX_SOFTWARE |
343 SOF_TIMESTAMPING_SOFTWARE;
344 info->phc_index = -1;
345
346 return 0;
347 }
342 348
343 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 349 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
344 SOF_TIMESTAMPING_RX_SOFTWARE | 350 SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 75dfac0248f4..f4cae2be0fda 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7148,7 +7148,7 @@ static void rtl8169_netpoll(struct net_device *dev)
7148{ 7148{
7149 struct rtl8169_private *tp = netdev_priv(dev); 7149 struct rtl8169_private *tp = netdev_priv(dev);
7150 7150
7151 rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev); 7151 rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
7152} 7152}
7153#endif 7153#endif
7154 7154
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 27be51f0a421..f3f7477043ce 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
17 17
18config SH_ETH 18config SH_ETH
19 tristate "Renesas SuperH Ethernet support" 19 tristate "Renesas SuperH Ethernet support"
20 depends on HAS_DMA
21 depends on ARCH_RENESAS || SUPERH || COMPILE_TEST 20 depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
22 select CRC32 21 select CRC32
23 select MII 22 select MII
@@ -31,7 +30,6 @@ config SH_ETH
31 30
32config RAVB 31config RAVB
33 tristate "Renesas Ethernet AVB support" 32 tristate "Renesas Ethernet AVB support"
34 depends on HAS_DMA
35 depends on ARCH_RENESAS || COMPILE_TEST 33 depends on ARCH_RENESAS || COMPILE_TEST
36 select CRC32 34 select CRC32
37 select MII 35 select MII
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ad4a354ce570..570ec72266f3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3180,6 +3180,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3180 return true; 3180 return true;
3181} 3181}
3182 3182
3183static
3183struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, 3184struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3184 const struct efx_filter_spec *spec) 3185 const struct efx_filter_spec *spec)
3185{ 3186{
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 8edf20967c82..e045a5d6b938 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
2794 if (!state) 2794 if (!state)
2795 return -ENOMEM; 2795 return -ENOMEM;
2796 efx->filter_state = state; 2796 efx->filter_state = state;
2797 init_rwsem(&state->lock);
2797 2798
2798 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2799 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2799 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; 2800 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cb5b0f58c395..edf20361ea5f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
111config DWMAC_SOCFPGA 111config DWMAC_SOCFPGA
112 tristate "SOCFPGA dwmac support" 112 tristate "SOCFPGA dwmac support"
113 default ARCH_SOCFPGA 113 default ARCH_SOCFPGA
114 depends on OF && (ARCH_SOCFPGA || COMPILE_TEST) 114 depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
115 select MFD_SYSCON 115 select MFD_SYSCON
116 help 116 help
117 Support for ethernet controller on Altera SOCFPGA 117 Support for ethernet controller on Altera SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 6e359572b9f0..5b3b06a0a3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -55,6 +55,7 @@ struct socfpga_dwmac {
55 struct device *dev; 55 struct device *dev;
56 struct regmap *sys_mgr_base_addr; 56 struct regmap *sys_mgr_base_addr;
57 struct reset_control *stmmac_rst; 57 struct reset_control *stmmac_rst;
58 struct reset_control *stmmac_ocp_rst;
58 void __iomem *splitter_base; 59 void __iomem *splitter_base;
59 bool f2h_ptp_ref_clk; 60 bool f2h_ptp_ref_clk;
60 struct tse_pcs pcs; 61 struct tse_pcs pcs;
@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
262 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; 263 val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
263 264
264 /* Assert reset to the enet controller before changing the phy mode */ 265 /* Assert reset to the enet controller before changing the phy mode */
265 if (dwmac->stmmac_rst) 266 reset_control_assert(dwmac->stmmac_ocp_rst);
266 reset_control_assert(dwmac->stmmac_rst); 267 reset_control_assert(dwmac->stmmac_rst);
267 268
268 regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); 269 regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
269 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 270 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
288 /* Deassert reset for the phy configuration to be sampled by 289 /* Deassert reset for the phy configuration to be sampled by
289 * the enet controller, and operation to start in requested mode 290 * the enet controller, and operation to start in requested mode
290 */ 291 */
291 if (dwmac->stmmac_rst) 292 reset_control_deassert(dwmac->stmmac_ocp_rst);
292 reset_control_deassert(dwmac->stmmac_rst); 293 reset_control_deassert(dwmac->stmmac_rst);
293 if (phymode == PHY_INTERFACE_MODE_SGMII) { 294 if (phymode == PHY_INTERFACE_MODE_SGMII) {
294 if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { 295 if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
295 dev_err(dwmac->dev, "Unable to initialize TSE PCS"); 296 dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
324 goto err_remove_config_dt; 325 goto err_remove_config_dt;
325 } 326 }
326 327
328 dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
329 if (IS_ERR(dwmac->stmmac_ocp_rst)) {
330 ret = PTR_ERR(dwmac->stmmac_ocp_rst);
331 dev_err(dev, "error getting reset control of ocp %d\n", ret);
332 goto err_remove_config_dt;
333 }
334
335 reset_control_deassert(dwmac->stmmac_ocp_rst);
336
327 ret = socfpga_dwmac_parse_data(dwmac, dev); 337 ret = socfpga_dwmac_parse_data(dwmac, dev);
328 if (ret) { 338 if (ret) {
329 dev_err(dev, "Unable to parse OF data\n"); 339 dev_err(dev, "Unable to parse OF data\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37f17ca62fe..65bc3556bd8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
407 } 407 }
408} 408}
409 409
410static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
411{
412 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
413
414 value &= ~DMA_RBSZ_MASK;
415 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
416
417 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
418}
419
410const struct stmmac_dma_ops dwmac4_dma_ops = { 420const struct stmmac_dma_ops dwmac4_dma_ops = {
411 .reset = dwmac4_dma_reset, 421 .reset = dwmac4_dma_reset,
412 .init = dwmac4_dma_init, 422 .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
431 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 441 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
432 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 442 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
433 .enable_tso = dwmac4_enable_tso, 443 .enable_tso = dwmac4_enable_tso,
444 .set_bfsize = dwmac4_set_bfsize,
434}; 445};
435 446
436const struct stmmac_dma_ops dwmac410_dma_ops = { 447const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
457 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 468 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
458 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 469 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
459 .enable_tso = dwmac4_enable_tso, 470 .enable_tso = dwmac4_enable_tso,
471 .set_bfsize = dwmac4_set_bfsize,
460}; 472};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index c63c1fe3f26b..22a4a6dbb1a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -120,6 +120,8 @@
120 120
121/* DMA Rx Channel X Control register defines */ 121/* DMA Rx Channel X Control register defines */
122#define DMA_CONTROL_SR BIT(0) 122#define DMA_CONTROL_SR BIT(0)
123#define DMA_RBSZ_MASK GENMASK(14, 1)
124#define DMA_RBSZ_SHIFT 1
123 125
124/* Interrupt status per channel */ 126/* Interrupt status per channel */
125#define DMA_CHAN_STATUS_REB GENMASK(21, 19) 127#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e44e7b26ce82..fe8b536b13f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
183 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 183 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
184 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 184 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
185 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); 185 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
186 void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
186}; 187};
187 188
188#define stmmac_reset(__priv, __args...) \ 189#define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
235 stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) 236 stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
236#define stmmac_enable_tso(__priv, __args...) \ 237#define stmmac_enable_tso(__priv, __args...) \
237 stmmac_do_void_callback(__priv, dma, enable_tso, __args) 238 stmmac_do_void_callback(__priv, dma, enable_tso, __args)
239#define stmmac_set_dma_bfsize(__priv, __args...) \
240 stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
238 241
239struct mac_device_info; 242struct mac_device_info;
240struct net_device; 243struct net_device;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e79b0d7b388a..60f59abab009 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
928static int stmmac_init_phy(struct net_device *dev) 928static int stmmac_init_phy(struct net_device *dev)
929{ 929{
930 struct stmmac_priv *priv = netdev_priv(dev); 930 struct stmmac_priv *priv = netdev_priv(dev);
931 u32 tx_cnt = priv->plat->tx_queues_to_use;
931 struct phy_device *phydev; 932 struct phy_device *phydev;
932 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 933 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
933 char bus_id[MII_BUS_ID_SIZE]; 934 char bus_id[MII_BUS_ID_SIZE];
@@ -969,6 +970,15 @@ static int stmmac_init_phy(struct net_device *dev)
969 SUPPORTED_1000baseT_Full); 970 SUPPORTED_1000baseT_Full);
970 971
971 /* 972 /*
973 * Half-duplex mode not supported with multiqueue
974 * half-duplex can only works with single queue
975 */
976 if (tx_cnt > 1)
977 phydev->supported &= ~(SUPPORTED_1000baseT_Half |
978 SUPPORTED_100baseT_Half |
979 SUPPORTED_10baseT_Half);
980
981 /*
972 * Broken HW is sometimes missing the pull-up resistor on the 982 * Broken HW is sometimes missing the pull-up resistor on the
973 * MDIO line, which results in reads to non-existent devices returning 983 * MDIO line, which results in reads to non-existent devices returning
974 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 984 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
@@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1794 1804
1795 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1805 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1796 rxfifosz, qmode); 1806 rxfifosz, qmode);
1807 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1808 chan);
1797 } 1809 }
1798 1810
1799 for (chan = 0; chan < tx_channels_count; chan++) { 1811 for (chan = 0; chan < tx_channels_count; chan++) {
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 7a16d40a72d1..b9221fc1674d 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -60,8 +60,7 @@
60#include <linux/sungem_phy.h> 60#include <linux/sungem_phy.h>
61#include "sungem.h" 61#include "sungem.h"
62 62
63/* Stripping FCS is causing problems, disabled for now */ 63#define STRIP_FCS
64#undef STRIP_FCS
65 64
66#define DEFAULT_MSG (NETIF_MSG_DRV | \ 65#define DEFAULT_MSG (NETIF_MSG_DRV | \
67 NETIF_MSG_PROBE | \ 66 NETIF_MSG_PROBE | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
435 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); 434 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
436 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 435 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
437 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 436 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
438 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 437 (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
439 writel(val, gp->regs + RXDMA_CFG); 438 writel(val, gp->regs + RXDMA_CFG);
440 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) 439 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
441 writel(((5 & RXDMA_BLANK_IPKTS) | 440 writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
760 struct net_device *dev = gp->dev; 759 struct net_device *dev = gp->dev;
761 int entry, drops, work_done = 0; 760 int entry, drops, work_done = 0;
762 u32 done; 761 u32 done;
763 __sum16 csum;
764 762
765 if (netif_msg_rx_status(gp)) 763 if (netif_msg_rx_status(gp))
766 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", 764 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
855 skb = copy_skb; 853 skb = copy_skb;
856 } 854 }
857 855
858 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); 856 if (likely(dev->features & NETIF_F_RXCSUM)) {
859 skb->csum = csum_unfold(csum); 857 __sum16 csum;
860 skb->ip_summed = CHECKSUM_COMPLETE; 858
859 csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
860 skb->csum = csum_unfold(csum);
861 skb->ip_summed = CHECKSUM_COMPLETE;
862 }
861 skb->protocol = eth_type_trans(skb, gp->dev); 863 skb->protocol = eth_type_trans(skb, gp->dev);
862 864
863 napi_gro_receive(&gp->napi, skb); 865 napi_gro_receive(&gp->napi, skb);
@@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
1761 writel(0, gp->regs + TXDMA_KICK); 1763 writel(0, gp->regs + TXDMA_KICK);
1762 1764
1763 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | 1765 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1764 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128); 1766 (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
1765 writel(val, gp->regs + RXDMA_CFG); 1767 writel(val, gp->regs + RXDMA_CFG);
1766 1768
1767 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); 1769 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2985 pci_set_drvdata(pdev, dev); 2987 pci_set_drvdata(pdev, dev);
2986 2988
2987 /* We can do scatter/gather and HW checksum */ 2989 /* We can do scatter/gather and HW checksum */
2988 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 2990 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2989 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 2991 dev->features = dev->hw_features;
2990 if (pci_using_dac) 2992 if (pci_using_dac)
2991 dev->features |= NETIF_F_HIGHDMA; 2993 dev->features |= NETIF_F_HIGHDMA;
2992 2994
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index cdbddf16dd29..4f1267477aa4 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
205 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 205 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
206 * abstract out these details 206 * abstract out these details
207 */ 207 */
208int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) 208static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
209{ 209{
210 struct cpdma_params *cpdma_params = &ctlr->params; 210 struct cpdma_params *cpdma_params = &ctlr->params;
211 struct cpdma_desc_pool *pool; 211 struct cpdma_desc_pool *pool;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 06d7c9e4dcda..f270beebb428 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
1385 return -EOPNOTSUPP; 1385 return -EOPNOTSUPP;
1386} 1386}
1387 1387
1388static int match_first_device(struct device *dev, void *data)
1389{
1390 if (dev->parent && dev->parent->of_node)
1391 return of_device_is_compatible(dev->parent->of_node,
1392 "ti,davinci_mdio");
1393
1394 return !strncmp(dev_name(dev), "davinci_mdio", 12);
1395}
1396
1388/** 1397/**
1389 * emac_dev_open - EMAC device open 1398 * emac_dev_open - EMAC device open
1390 * @ndev: The DaVinci EMAC network adapter 1399 * @ndev: The DaVinci EMAC network adapter
@@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev)
1484 1493
1485 /* use the first phy on the bus if pdata did not give us a phy id */ 1494 /* use the first phy on the bus if pdata did not give us a phy id */
1486 if (!phydev && !priv->phy_id) { 1495 if (!phydev && !priv->phy_id) {
1487 phy = bus_find_device_by_name(&mdio_bus_type, NULL, 1496 /* NOTE: we can't use bus_find_device_by_name() here because
1488 "davinci_mdio"); 1497 * the device name is not guaranteed to be 'davinci_mdio'. On
1498 * some systems it can be 'davinci_mdio.0' so we need to use
1499 * strncmp() against the first part of the string to correctly
1500 * match it.
1501 */
1502 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
1503 match_first_device);
1489 if (phy) { 1504 if (phy) {
1490 priv->phy_id = dev_name(phy); 1505 priv->phy_id = dev_name(phy);
1491 if (!priv->phy_id || !*priv->phy_id) 1506 if (!priv->phy_id || !*priv->phy_id)
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 750eaa53bf0c..ada33c2d9ac2 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
476out_unlock: 476out_unlock:
477 rcu_read_unlock(); 477 rcu_read_unlock();
478out: 478out:
479 NAPI_GRO_CB(skb)->flush |= flush; 479 skb_gro_flush_final(skb, pp, flush);
480 480
481 return pp; 481 return pp;
482} 482}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f347fd9c5b28..777fa59f5e0c 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -89,10 +89,6 @@
89static const char banner[] __initconst = KERN_INFO \ 89static const char banner[] __initconst = KERN_INFO \
90 "AX.25: bpqether driver version 004\n"; 90 "AX.25: bpqether driver version 004\n";
91 91
92static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
93
94static char bpq_eth_addr[6];
95
96static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); 92static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
97static int bpq_device_event(struct notifier_block *, unsigned long, void *); 93static int bpq_device_event(struct notifier_block *, unsigned long, void *);
98 94
@@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
501 bpq->ethdev = edev; 497 bpq->ethdev = edev;
502 bpq->axdev = ndev; 498 bpq->axdev = ndev;
503 499
504 memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr)); 500 eth_broadcast_addr(bpq->dest_addr);
505 memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr)); 501 eth_broadcast_addr(bpq->acpt_addr);
506 502
507 err = register_netdevice(ndev); 503 err = register_netdevice(ndev);
508 if (err) 504 if (err)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 1a924b867b07..4b6e308199d2 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
210void netvsc_channel_cb(void *context); 210void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 211int netvsc_poll(struct napi_struct *napi, int budget);
212 212
213void rndis_set_subchannel(struct work_struct *w); 213int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
214int rndis_filter_open(struct netvsc_device *nvdev); 214int rndis_filter_open(struct netvsc_device *nvdev);
215int rndis_filter_close(struct netvsc_device *nvdev); 215int rndis_filter_close(struct netvsc_device *nvdev);
216struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 216struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5d5bd513847f..8e9d0ee1572b 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
65 VM_PKT_DATA_INBAND, 0); 65 VM_PKT_DATA_INBAND, 0);
66} 66}
67 67
68/* Worker to setup sub channels on initial setup
69 * Initial hotplug event occurs in softirq context
70 * and can't wait for channels.
71 */
72static void netvsc_subchan_work(struct work_struct *w)
73{
74 struct netvsc_device *nvdev =
75 container_of(w, struct netvsc_device, subchan_work);
76 struct rndis_device *rdev;
77 int i, ret;
78
79 /* Avoid deadlock with device removal already under RTNL */
80 if (!rtnl_trylock()) {
81 schedule_work(w);
82 return;
83 }
84
85 rdev = nvdev->extension;
86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev);
88 if (ret == 0) {
89 netif_device_attach(rdev->ndev);
90 } else {
91 /* fallback to only primary channel */
92 for (i = 1; i < nvdev->num_chn; i++)
93 netif_napi_del(&nvdev->chan_table[i].napi);
94
95 nvdev->max_chn = 1;
96 nvdev->num_chn = 1;
97 }
98 }
99
100 rtnl_unlock();
101}
102
68static struct netvsc_device *alloc_net_device(void) 103static struct netvsc_device *alloc_net_device(void)
69{ 104{
70 struct netvsc_device *net_device; 105 struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
81 116
82 init_completion(&net_device->channel_init_wait); 117 init_completion(&net_device->channel_init_wait);
83 init_waitqueue_head(&net_device->subchan_open); 118 init_waitqueue_head(&net_device->subchan_open);
84 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); 119 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
85 120
86 return net_device; 121 return net_device;
87} 122}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fe2256bf1d13..dd1d6e115145 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
905 if (IS_ERR(nvdev)) 905 if (IS_ERR(nvdev))
906 return PTR_ERR(nvdev); 906 return PTR_ERR(nvdev);
907 907
908 /* Note: enable and attach happen when sub-channels setup */ 908 if (nvdev->num_chn > 1) {
909 ret = rndis_set_subchannel(ndev, nvdev);
910
911 /* if unavailable, just proceed with one queue */
912 if (ret) {
913 nvdev->max_chn = 1;
914 nvdev->num_chn = 1;
915 }
916 }
917
918 /* In any case device is now ready */
919 netif_device_attach(ndev);
909 920
921 /* Note: enable and attach happen when sub-channels setup */
910 netif_carrier_off(ndev); 922 netif_carrier_off(ndev);
911 923
912 if (netif_running(ndev)) { 924 if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
2089 2101
2090 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2102 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2091 2103
2104 if (nvdev->num_chn > 1)
2105 schedule_work(&nvdev->subchan_work);
2106
2092 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2107 /* hw_features computed in rndis_netdev_set_hwcaps() */
2093 net->features = net->hw_features | 2108 net->features = net->hw_features |
2094 NETIF_F_HIGHDMA | NETIF_F_SG | 2109 NETIF_F_HIGHDMA | NETIF_F_SG |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 5428bb261102..9b4e3c3787e5 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1062 * This breaks overlap of processing the host message for the 1062 * This breaks overlap of processing the host message for the
1063 * new primary channel with the initialization of sub-channels. 1063 * new primary channel with the initialization of sub-channels.
1064 */ 1064 */
1065void rndis_set_subchannel(struct work_struct *w) 1065int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1066{ 1066{
1067 struct netvsc_device *nvdev
1068 = container_of(w, struct netvsc_device, subchan_work);
1069 struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1067 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1070 struct net_device_context *ndev_ctx; 1068 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1071 struct rndis_device *rdev; 1069 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1072 struct net_device *ndev; 1070 struct rndis_device *rdev = nvdev->extension;
1073 struct hv_device *hv_dev;
1074 int i, ret; 1071 int i, ret;
1075 1072
1076 if (!rtnl_trylock()) { 1073 ASSERT_RTNL();
1077 schedule_work(w);
1078 return;
1079 }
1080
1081 rdev = nvdev->extension;
1082 if (!rdev)
1083 goto unlock; /* device was removed */
1084
1085 ndev = rdev->ndev;
1086 ndev_ctx = netdev_priv(ndev);
1087 hv_dev = ndev_ctx->device_ctx;
1088 1074
1089 memset(init_packet, 0, sizeof(struct nvsp_message)); 1075 memset(init_packet, 0, sizeof(struct nvsp_message));
1090 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; 1076 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
1100 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1086 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1101 if (ret) { 1087 if (ret) {
1102 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); 1088 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1103 goto failed; 1089 return ret;
1104 } 1090 }
1105 1091
1106 wait_for_completion(&nvdev->channel_init_wait); 1092 wait_for_completion(&nvdev->channel_init_wait);
1107 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1093 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1108 netdev_err(ndev, "sub channel request failed\n"); 1094 netdev_err(ndev, "sub channel request failed\n");
1109 goto failed; 1095 return -EIO;
1110 } 1096 }
1111 1097
1112 nvdev->num_chn = 1 + 1098 nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
1125 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1111 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1126 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1112 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1127 1113
1128 netif_device_attach(ndev); 1114 return 0;
1129 rtnl_unlock();
1130 return;
1131
1132failed:
1133 /* fallback to only primary channel */
1134 for (i = 1; i < nvdev->num_chn; i++)
1135 netif_napi_del(&nvdev->chan_table[i].napi);
1136
1137 nvdev->max_chn = 1;
1138 nvdev->num_chn = 1;
1139
1140 netif_device_attach(ndev);
1141unlock:
1142 rtnl_unlock();
1143} 1115}
1144 1116
1145static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, 1117static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1360 netif_napi_add(net, &net_device->chan_table[i].napi, 1332 netif_napi_add(net, &net_device->chan_table[i].napi,
1361 netvsc_poll, NAPI_POLL_WEIGHT); 1333 netvsc_poll, NAPI_POLL_WEIGHT);
1362 1334
1363 if (net_device->num_chn > 1) 1335 return net_device;
1364 schedule_work(&net_device->subchan_work);
1365 1336
1366out: 1337out:
1367 /* if unavailable, just proceed with one queue */ 1338 /* setting up multiple channels failed */
1368 if (ret) { 1339 net_device->max_chn = 1;
1369 net_device->max_chn = 1; 1340 net_device->num_chn = 1;
1370 net_device->num_chn = 1;
1371 }
1372
1373 /* No sub channels, device is ready */
1374 if (net_device->num_chn == 1)
1375 netif_device_attach(net);
1376
1377 return net_device;
1378 1341
1379err_dev_remv: 1342err_dev_remv:
1380 rndis_filter_device_remove(dev, net_device); 1343 rndis_filter_device_remove(dev, net_device);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4377c26f714d..4a949569ec4c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
75{ 75{
76 struct ipvl_dev *ipvlan; 76 struct ipvl_dev *ipvlan;
77 struct net_device *mdev = port->dev; 77 struct net_device *mdev = port->dev;
78 int err = 0; 78 unsigned int flags;
79 int err;
79 80
80 ASSERT_RTNL(); 81 ASSERT_RTNL();
81 if (port->mode != nval) { 82 if (port->mode != nval) {
83 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
84 flags = ipvlan->dev->flags;
85 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
86 err = dev_change_flags(ipvlan->dev,
87 flags | IFF_NOARP);
88 } else {
89 err = dev_change_flags(ipvlan->dev,
90 flags & ~IFF_NOARP);
91 }
92 if (unlikely(err))
93 goto fail;
94 }
82 if (nval == IPVLAN_MODE_L3S) { 95 if (nval == IPVLAN_MODE_L3S) {
83 /* New mode is L3S */ 96 /* New mode is L3S */
84 err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 97 err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
86 mdev->l3mdev_ops = &ipvl_l3mdev_ops; 99 mdev->l3mdev_ops = &ipvl_l3mdev_ops;
87 mdev->priv_flags |= IFF_L3MDEV_MASTER; 100 mdev->priv_flags |= IFF_L3MDEV_MASTER;
88 } else 101 } else
89 return err; 102 goto fail;
90 } else if (port->mode == IPVLAN_MODE_L3S) { 103 } else if (port->mode == IPVLAN_MODE_L3S) {
91 /* Old mode was L3S */ 104 /* Old mode was L3S */
92 mdev->priv_flags &= ~IFF_L3MDEV_MASTER; 105 mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
93 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 106 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
94 mdev->l3mdev_ops = NULL; 107 mdev->l3mdev_ops = NULL;
95 } 108 }
96 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
97 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
98 ipvlan->dev->flags |= IFF_NOARP;
99 else
100 ipvlan->dev->flags &= ~IFF_NOARP;
101 }
102 port->mode = nval; 109 port->mode = nval;
103 } 110 }
111 return 0;
112
113fail:
114 /* Undo the flags changes that have been done so far. */
115 list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
116 flags = ipvlan->dev->flags;
117 if (port->mode == IPVLAN_MODE_L3 ||
118 port->mode == IPVLAN_MODE_L3S)
119 dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
120 else
121 dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
122 }
123
104 return err; 124 return err;
105} 125}
106 126
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
594 ipvlan->phy_dev = phy_dev; 614 ipvlan->phy_dev = phy_dev;
595 ipvlan->dev = dev; 615 ipvlan->dev = dev;
596 ipvlan->sfeatures = IPVLAN_FEATURES; 616 ipvlan->sfeatures = IPVLAN_FEATURES;
597 ipvlan_adjust_mtu(ipvlan, phy_dev); 617 if (!tb[IFLA_MTU])
618 ipvlan_adjust_mtu(ipvlan, phy_dev);
598 INIT_LIST_HEAD(&ipvlan->addrs); 619 INIT_LIST_HEAD(&ipvlan->addrs);
599 spin_lock_init(&ipvlan->addrs_lock); 620 spin_lock_init(&ipvlan->addrs_lock);
600 621
@@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev)
693{ 714{
694 ether_setup(dev); 715 ether_setup(dev);
695 716
717 dev->max_mtu = ETH_MAX_MTU;
696 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 718 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
697 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; 719 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
698 dev->netdev_ops = &ipvlan_netdev_ops; 720 dev->netdev_ops = &ipvlan_netdev_ops;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 83f7420ddea5..4f390fa557e4 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
527 527
528 netif_addr_lock_bh(failover_dev); 528 netif_addr_lock_bh(failover_dev);
529 dev_uc_sync_multiple(slave_dev, failover_dev); 529 dev_uc_sync_multiple(slave_dev, failover_dev);
530 dev_uc_sync_multiple(slave_dev, failover_dev); 530 dev_mc_sync_multiple(slave_dev, failover_dev);
531 netif_addr_unlock_bh(failover_dev); 531 netif_addr_unlock_bh(failover_dev);
532 532
533 err = vlan_vids_add_by_dev(slave_dev, failover_dev); 533 err = vlan_vids_add_by_dev(slave_dev, failover_dev);
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 081d99aa3985..49ac678eb2dc 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
222 if (err < 0) 222 if (err < 0)
223 return err; 223 return err;
224 224
225 err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); 225 err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
226 } 226 }
227 227
228 return err; 228 return err;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index de51e8f70f44..ce61231e96ea 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
1107 .socketpair = sock_no_socketpair, 1107 .socketpair = sock_no_socketpair,
1108 .accept = sock_no_accept, 1108 .accept = sock_no_accept,
1109 .getname = pppoe_getname, 1109 .getname = pppoe_getname,
1110 .poll_mask = datagram_poll_mask, 1110 .poll = datagram_poll,
1111 .listen = sock_no_listen, 1111 .listen = sock_no_listen,
1112 .shutdown = sock_no_shutdown, 1112 .shutdown = sock_no_shutdown,
1113 .setsockopt = sock_no_setsockopt, 1113 .setsockopt = sock_no_setsockopt,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b0e8b9613054..1eaec648bd1f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
967 967
968 atomic_set(&ctx->stop, 1); 968 atomic_set(&ctx->stop, 1);
969 969
970 if (hrtimer_active(&ctx->tx_timer)) 970 hrtimer_cancel(&ctx->tx_timer);
971 hrtimer_cancel(&ctx->tx_timer);
972 971
973 tasklet_kill(&ctx->bh); 972 tasklet_kill(&ctx->bh);
974 973
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8dff87ec6d99..2e4130746c40 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -64,6 +64,7 @@
64#define DEFAULT_RX_CSUM_ENABLE (true) 64#define DEFAULT_RX_CSUM_ENABLE (true)
65#define DEFAULT_TSO_CSUM_ENABLE (true) 65#define DEFAULT_TSO_CSUM_ENABLE (true)
66#define DEFAULT_VLAN_FILTER_ENABLE (true) 66#define DEFAULT_VLAN_FILTER_ENABLE (true)
67#define DEFAULT_VLAN_RX_OFFLOAD (true)
67#define TX_OVERHEAD (8) 68#define TX_OVERHEAD (8)
68#define RXW_PADDING 2 69#define RXW_PADDING 2
69 70
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2298 if ((ll_mtu % dev->maxpacket) == 0) 2299 if ((ll_mtu % dev->maxpacket) == 0)
2299 return -EDOM; 2300 return -EDOM;
2300 2301
2301 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); 2302 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2302 2303
2303 netdev->mtu = new_mtu; 2304 netdev->mtu = new_mtu;
2304 2305
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
2364 } 2365 }
2365 2366
2366 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2367 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2368 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2369 else
2370 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2371
2372 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2367 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; 2373 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2368 else 2374 else
2369 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; 2375 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2587 buf |= FCT_TX_CTL_EN_; 2593 buf |= FCT_TX_CTL_EN_;
2588 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); 2594 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2589 2595
2590 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); 2596 ret = lan78xx_set_rx_max_frame_length(dev,
2597 dev->net->mtu + VLAN_ETH_HLEN);
2591 2598
2592 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 2599 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2593 buf |= MAC_RX_RXEN_; 2600 buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2975 if (DEFAULT_TSO_CSUM_ENABLE) 2982 if (DEFAULT_TSO_CSUM_ENABLE)
2976 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; 2983 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2977 2984
2985 if (DEFAULT_VLAN_RX_OFFLOAD)
2986 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2987
2988 if (DEFAULT_VLAN_FILTER_ENABLE)
2989 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2990
2978 dev->net->hw_features = dev->net->features; 2991 dev->net->hw_features = dev->net->features;
2979 2992
2980 ret = lan78xx_setup_irq_domain(dev); 2993 ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3039 struct sk_buff *skb, 3052 struct sk_buff *skb,
3040 u32 rx_cmd_a, u32 rx_cmd_b) 3053 u32 rx_cmd_a, u32 rx_cmd_b)
3041{ 3054{
3055 /* HW Checksum offload appears to be flawed if used when not stripping
3056 * VLAN headers. Drop back to S/W checksums under these conditions.
3057 */
3042 if (!(dev->net->features & NETIF_F_RXCSUM) || 3058 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3043 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { 3059 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3060 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3061 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3044 skb->ip_summed = CHECKSUM_NONE; 3062 skb->ip_summed = CHECKSUM_NONE;
3045 } else { 3063 } else {
3046 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); 3064 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3048 } 3066 }
3049} 3067}
3050 3068
3069static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3070 struct sk_buff *skb,
3071 u32 rx_cmd_a, u32 rx_cmd_b)
3072{
3073 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3074 (rx_cmd_a & RX_CMD_A_FVTG_))
3075 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3076 (rx_cmd_b & 0xffff));
3077}
3078
3051static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) 3079static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3052{ 3080{
3053 int status; 3081 int status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3112 if (skb->len == size) { 3140 if (skb->len == size) {
3113 lan78xx_rx_csum_offload(dev, skb, 3141 lan78xx_rx_csum_offload(dev, skb,
3114 rx_cmd_a, rx_cmd_b); 3142 rx_cmd_a, rx_cmd_b);
3143 lan78xx_rx_vlan_offload(dev, skb,
3144 rx_cmd_a, rx_cmd_b);
3115 3145
3116 skb_trim(skb, skb->len - 4); /* remove fcs */ 3146 skb_trim(skb, skb->len - 4); /* remove fcs */
3117 skb->truesize = size + sizeof(struct sk_buff); 3147 skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3130 skb_set_tail_pointer(skb2, size); 3160 skb_set_tail_pointer(skb2, size);
3131 3161
3132 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); 3162 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3163 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3133 3164
3134 skb_trim(skb2, skb2->len - 4); /* remove fcs */ 3165 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3135 skb2->truesize = size + sizeof(struct sk_buff); 3166 skb2->truesize = size + sizeof(struct sk_buff);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8e8b51f171f4..8fac8e132c5b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1246,6 +1246,7 @@ static const struct usb_device_id products[] = {
1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1249 {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */
1249 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1250 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1250 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ 1251 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1251 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 1252 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86f7196f9d91..2a58607a6aea 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
3962#ifdef CONFIG_PM_SLEEP 3962#ifdef CONFIG_PM_SLEEP
3963 unregister_pm_notifier(&tp->pm_notifier); 3963 unregister_pm_notifier(&tp->pm_notifier);
3964#endif 3964#endif
3965 napi_disable(&tp->napi); 3965 if (!test_bit(RTL8152_UNPLUG, &tp->flags))
3966 napi_disable(&tp->napi);
3966 clear_bit(WORK_ENABLE, &tp->flags); 3967 clear_bit(WORK_ENABLE, &tp->flags);
3967 usb_kill_urb(tp->intr_urb); 3968 usb_kill_urb(tp->intr_urb);
3968 cancel_delayed_work_sync(&tp->schedule); 3969 cancel_delayed_work_sync(&tp->schedule);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6c9a2af3732..53085c63277b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
53/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 53/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
54#define VIRTIO_XDP_HEADROOM 256 54#define VIRTIO_XDP_HEADROOM 256
55 55
56/* Separating two types of XDP xmit */
57#define VIRTIO_XDP_TX BIT(0)
58#define VIRTIO_XDP_REDIR BIT(1)
59
56/* RX packet size EWMA. The average packet size is used to determine the packet 60/* RX packet size EWMA. The average packet size is used to determine the packet
57 * buffer size when refilling RX rings. As the entire RX ring may be refilled 61 * buffer size when refilling RX rings. As the entire RX ring may be refilled
58 * at once, the weight is chosen so that the EWMA will be insensitive to short- 62 * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
582 struct receive_queue *rq, 586 struct receive_queue *rq,
583 void *buf, void *ctx, 587 void *buf, void *ctx,
584 unsigned int len, 588 unsigned int len,
585 bool *xdp_xmit) 589 unsigned int *xdp_xmit)
586{ 590{
587 struct sk_buff *skb; 591 struct sk_buff *skb;
588 struct bpf_prog *xdp_prog; 592 struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
654 trace_xdp_exception(vi->dev, xdp_prog, act); 658 trace_xdp_exception(vi->dev, xdp_prog, act);
655 goto err_xdp; 659 goto err_xdp;
656 } 660 }
657 *xdp_xmit = true; 661 *xdp_xmit |= VIRTIO_XDP_TX;
658 rcu_read_unlock(); 662 rcu_read_unlock();
659 goto xdp_xmit; 663 goto xdp_xmit;
660 case XDP_REDIRECT: 664 case XDP_REDIRECT:
661 err = xdp_do_redirect(dev, &xdp, xdp_prog); 665 err = xdp_do_redirect(dev, &xdp, xdp_prog);
662 if (err) 666 if (err)
663 goto err_xdp; 667 goto err_xdp;
664 *xdp_xmit = true; 668 *xdp_xmit |= VIRTIO_XDP_REDIR;
665 rcu_read_unlock(); 669 rcu_read_unlock();
666 goto xdp_xmit; 670 goto xdp_xmit;
667 default: 671 default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
723 void *buf, 727 void *buf,
724 void *ctx, 728 void *ctx,
725 unsigned int len, 729 unsigned int len,
726 bool *xdp_xmit) 730 unsigned int *xdp_xmit)
727{ 731{
728 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 732 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
729 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 733 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
818 put_page(xdp_page); 822 put_page(xdp_page);
819 goto err_xdp; 823 goto err_xdp;
820 } 824 }
821 *xdp_xmit = true; 825 *xdp_xmit |= VIRTIO_XDP_TX;
822 if (unlikely(xdp_page != page)) 826 if (unlikely(xdp_page != page))
823 put_page(page); 827 put_page(page);
824 rcu_read_unlock(); 828 rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
830 put_page(xdp_page); 834 put_page(xdp_page);
831 goto err_xdp; 835 goto err_xdp;
832 } 836 }
833 *xdp_xmit = true; 837 *xdp_xmit |= VIRTIO_XDP_REDIR;
834 if (unlikely(xdp_page != page)) 838 if (unlikely(xdp_page != page))
835 put_page(page); 839 put_page(page);
836 rcu_read_unlock(); 840 rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
939} 943}
940 944
941static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 945static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
942 void *buf, unsigned int len, void **ctx, bool *xdp_xmit) 946 void *buf, unsigned int len, void **ctx,
947 unsigned int *xdp_xmit)
943{ 948{
944 struct net_device *dev = vi->dev; 949 struct net_device *dev = vi->dev;
945 struct sk_buff *skb; 950 struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
1232 } 1237 }
1233} 1238}
1234 1239
1235static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) 1240static int virtnet_receive(struct receive_queue *rq, int budget,
1241 unsigned int *xdp_xmit)
1236{ 1242{
1237 struct virtnet_info *vi = rq->vq->vdev->priv; 1243 struct virtnet_info *vi = rq->vq->vdev->priv;
1238 unsigned int len, received = 0, bytes = 0; 1244 unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1321 struct virtnet_info *vi = rq->vq->vdev->priv; 1327 struct virtnet_info *vi = rq->vq->vdev->priv;
1322 struct send_queue *sq; 1328 struct send_queue *sq;
1323 unsigned int received, qp; 1329 unsigned int received, qp;
1324 bool xdp_xmit = false; 1330 unsigned int xdp_xmit = 0;
1325 1331
1326 virtnet_poll_cleantx(rq); 1332 virtnet_poll_cleantx(rq);
1327 1333
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1331 if (received < budget) 1337 if (received < budget)
1332 virtqueue_napi_complete(napi, rq->vq, received); 1338 virtqueue_napi_complete(napi, rq->vq, received);
1333 1339
1334 if (xdp_xmit) { 1340 if (xdp_xmit & VIRTIO_XDP_REDIR)
1341 xdp_do_flush_map();
1342
1343 if (xdp_xmit & VIRTIO_XDP_TX) {
1335 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + 1344 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1336 smp_processor_id(); 1345 smp_processor_id();
1337 sq = &vi->sq[qp]; 1346 sq = &vi->sq[qp];
1338 virtqueue_kick(sq->vq); 1347 virtqueue_kick(sq->vq);
1339 xdp_do_flush_map();
1340 } 1348 }
1341 1349
1342 return received; 1350 return received;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aee0e60471f1..f6bb1d54d4bd 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
623 flush = 0; 623 flush = 0;
624 624
625out: 625out:
626 skb_gro_remcsum_cleanup(skb, &grc); 626 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
627 skb->remcsum_offload = 0;
628 NAPI_GRO_CB(skb)->flush |= flush;
629 627
630 return pp; 628 return pp;
631} 629}
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 9d99eb42d917..6acba67bca07 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
60 bool "PCIE bus interface support for FullMAC driver" 60 bool "PCIE bus interface support for FullMAC driver"
61 depends on BRCMFMAC 61 depends on BRCMFMAC
62 depends on PCI 62 depends on PCI
63 depends on HAS_DMA
64 select BRCMFMAC_PROTO_MSGBUF 63 select BRCMFMAC_PROTO_MSGBUF
65 select FW_LOADER 64 select FW_LOADER
66 ---help--- 65 ---help---
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 025fa6018550..8d1492a90bd1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -7,7 +7,7 @@ config QTNFMAC
7config QTNFMAC_PEARL_PCIE 7config QTNFMAC_PEARL_PCIE
8 tristate "Quantenna QSR10g PCIe support" 8 tristate "Quantenna QSR10g PCIe support"
9 default n 9 default n
10 depends on HAS_DMA && PCI && CFG80211 10 depends on PCI && CFG80211
11 select QTNFMAC 11 select QTNFMAC
12 select FW_LOADER 12 select FW_LOADER
13 select CRC32 13 select CRC32
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 922ce0abf5cf..a57daecf1d57 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1810,7 +1810,7 @@ static int talk_to_netback(struct xenbus_device *dev,
1810 err = xen_net_read_mac(dev, info->netdev->dev_addr); 1810 err = xen_net_read_mac(dev, info->netdev->dev_addr);
1811 if (err) { 1811 if (err) {
1812 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1812 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1813 goto out; 1813 goto out_unlocked;
1814 } 1814 }
1815 1815
1816 rtnl_lock(); 1816 rtnl_lock();
@@ -1925,6 +1925,7 @@ abort_transaction_no_dev_fatal:
1925 xennet_destroy_queues(info); 1925 xennet_destroy_queues(info);
1926 out: 1926 out:
1927 rtnl_unlock(); 1927 rtnl_unlock();
1928out_unlocked:
1928 device_unregister(&dev->dev); 1929 device_unregister(&dev->dev);
1929 return err; 1930 return err;
1930} 1931}
@@ -1950,10 +1951,6 @@ static int xennet_connect(struct net_device *dev)
1950 /* talk_to_netback() sets the correct number of queues */ 1951 /* talk_to_netback() sets the correct number of queues */
1951 num_queues = dev->real_num_tx_queues; 1952 num_queues = dev->real_num_tx_queues;
1952 1953
1953 rtnl_lock();
1954 netdev_update_features(dev);
1955 rtnl_unlock();
1956
1957 if (dev->reg_state == NETREG_UNINITIALIZED) { 1954 if (dev->reg_state == NETREG_UNINITIALIZED) {
1958 err = register_netdev(dev); 1955 err = register_netdev(dev);
1959 if (err) { 1956 if (err) {
@@ -1963,6 +1960,10 @@ static int xennet_connect(struct net_device *dev)
1963 } 1960 }
1964 } 1961 }
1965 1962
1963 rtnl_lock();
1964 netdev_update_features(dev);
1965 rtnl_unlock();
1966
1966 /* 1967 /*
1967 * All public and private state should now be sane. Get 1968 * All public and private state should now be sane. Get
1968 * ready to start sending and receiving packets and give the driver 1969 * ready to start sending and receiving packets and give the driver
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index d5553c47014f..5d823e965883 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
74 struct sk_buff *skb = NULL; 74 struct sk_buff *skb = NULL;
75 75
76 if (!urb->status) { 76 if (!urb->status) {
77 skb = alloc_skb(urb->actual_length, GFP_KERNEL); 77 skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
78 if (!skb) { 78 if (!skb) {
79 nfc_err(&phy->udev->dev, "failed to alloc memory\n"); 79 nfc_err(&phy->udev->dev, "failed to alloc memory\n");
80 } else { 80 } else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
186 186
187 if (dev->protocol_type == PN533_PROTO_REQ_RESP) { 187 if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
188 /* request for response for sent packet directly */ 188 /* request for response for sent packet directly */
189 rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); 189 rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
190 if (rc) 190 if (rc)
191 goto error; 191 goto error;
192 } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { 192 } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2e96b34bc936..fb667bf469c7 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
278 return -EIO; 278 return -EIO;
279 if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) 279 if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
280 return -EIO; 280 return -EIO;
281 return 0;
281 } 282 }
282 283
283 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 284 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 68940356cad3..8b1fd7f1a224 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
414 blk_queue_logical_block_size(q, pmem_sector_size(ndns)); 414 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
415 blk_queue_max_hw_sectors(q, UINT_MAX); 415 blk_queue_max_hw_sectors(q, UINT_MAX);
416 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 416 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
417 blk_queue_flag_set(QUEUE_FLAG_DAX, q); 417 if (pmem->pfn_flags & PFN_MAP)
418 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
418 q->queuedata = pmem; 419 q->queuedata = pmem;
419 420
420 disk = alloc_disk_node(0, nid); 421 disk = alloc_disk_node(0, nid);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 21710a7460c8..46df030b2c3f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1808,6 +1808,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1808 u32 max_segments = 1808 u32 max_segments =
1809 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; 1809 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
1810 1810
1811 max_segments = min_not_zero(max_segments, ctrl->max_segments);
1811 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1812 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1812 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1813 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1813 } 1814 }
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b528a2f5826c..41d45a1b5c62 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2790 /* re-enable the admin_q so anything new can fast fail */ 2790 /* re-enable the admin_q so anything new can fast fail */
2791 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2791 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2792 2792
2793 /* resume the io queues so that things will fast fail */
2794 nvme_start_queues(&ctrl->ctrl);
2795
2793 nvme_fc_ctlr_inactive_on_rport(ctrl); 2796 nvme_fc_ctlr_inactive_on_rport(ctrl);
2794} 2797}
2795 2798
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
2804 * waiting for io to terminate 2807 * waiting for io to terminate
2805 */ 2808 */
2806 nvme_fc_delete_association(ctrl); 2809 nvme_fc_delete_association(ctrl);
2807
2808 /* resume the io queues so that things will fast fail */
2809 nvme_start_queues(nctrl);
2810} 2810}
2811 2811
2812static void 2812static void
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 231807cbc849..0c4a33df3b2f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -170,6 +170,7 @@ struct nvme_ctrl {
170 u64 cap; 170 u64 cap;
171 u32 page_size; 171 u32 page_size;
172 u32 max_hw_sectors; 172 u32 max_hw_sectors;
173 u32 max_segments;
173 u16 oncs; 174 u16 oncs;
174 u16 oacs; 175 u16 oacs;
175 u16 nssa; 176 u16 nssa;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fc33804662e7..ba943f211687 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -38,6 +38,13 @@
38 38
39#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 39#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
40 40
41/*
42 * These can be higher, but we need to ensure that any command doesn't
43 * require an sg allocation that needs more than a page of data.
44 */
45#define NVME_MAX_KB_SZ 4096
46#define NVME_MAX_SEGS 127
47
41static int use_threaded_interrupts; 48static int use_threaded_interrupts;
42module_param(use_threaded_interrupts, int, 0); 49module_param(use_threaded_interrupts, int, 0);
43 50
@@ -100,6 +107,8 @@ struct nvme_dev {
100 struct nvme_ctrl ctrl; 107 struct nvme_ctrl ctrl;
101 struct completion ioq_wait; 108 struct completion ioq_wait;
102 109
110 mempool_t *iod_mempool;
111
103 /* shadow doorbell buffer support: */ 112 /* shadow doorbell buffer support: */
104 u32 *dbbuf_dbs; 113 u32 *dbbuf_dbs;
105 dma_addr_t dbbuf_dbs_dma_addr; 114 dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
477 iod->use_sgl = nvme_pci_use_sgls(dev, rq); 486 iod->use_sgl = nvme_pci_use_sgls(dev, rq);
478 487
479 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 488 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
480 size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg, 489 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
481 iod->use_sgl);
482
483 iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
484 if (!iod->sg) 490 if (!iod->sg)
485 return BLK_STS_RESOURCE; 491 return BLK_STS_RESOURCE;
486 } else { 492 } else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
526 } 532 }
527 533
528 if (iod->sg != iod->inline_sg) 534 if (iod->sg != iod->inline_sg)
529 kfree(iod->sg); 535 mempool_free(iod->sg, dev->iod_mempool);
530} 536}
531 537
532#ifdef CONFIG_BLK_DEV_INTEGRITY 538#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2280 blk_put_queue(dev->ctrl.admin_q); 2286 blk_put_queue(dev->ctrl.admin_q);
2281 kfree(dev->queues); 2287 kfree(dev->queues);
2282 free_opal_dev(dev->ctrl.opal_dev); 2288 free_opal_dev(dev->ctrl.opal_dev);
2289 mempool_destroy(dev->iod_mempool);
2283 kfree(dev); 2290 kfree(dev);
2284} 2291}
2285 2292
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
2289 2296
2290 nvme_get_ctrl(&dev->ctrl); 2297 nvme_get_ctrl(&dev->ctrl);
2291 nvme_dev_disable(dev, false); 2298 nvme_dev_disable(dev, false);
2299 nvme_kill_queues(&dev->ctrl);
2292 if (!queue_work(nvme_wq, &dev->remove_work)) 2300 if (!queue_work(nvme_wq, &dev->remove_work))
2293 nvme_put_ctrl(&dev->ctrl); 2301 nvme_put_ctrl(&dev->ctrl);
2294} 2302}
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
2333 if (result) 2341 if (result)
2334 goto out; 2342 goto out;
2335 2343
2344 /*
2345 * Limit the max command size to prevent iod->sg allocations going
2346 * over a single page.
2347 */
2348 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2349 dev->ctrl.max_segments = NVME_MAX_SEGS;
2350
2336 result = nvme_init_identify(&dev->ctrl); 2351 result = nvme_init_identify(&dev->ctrl);
2337 if (result) 2352 if (result)
2338 goto out; 2353 goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
2405 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 2420 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
2406 struct pci_dev *pdev = to_pci_dev(dev->dev); 2421 struct pci_dev *pdev = to_pci_dev(dev->dev);
2407 2422
2408 nvme_kill_queues(&dev->ctrl);
2409 if (pci_get_drvdata(pdev)) 2423 if (pci_get_drvdata(pdev))
2410 device_release_driver(&pdev->dev); 2424 device_release_driver(&pdev->dev);
2411 nvme_put_ctrl(&dev->ctrl); 2425 nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2509 int node, result = -ENOMEM; 2523 int node, result = -ENOMEM;
2510 struct nvme_dev *dev; 2524 struct nvme_dev *dev;
2511 unsigned long quirks = id->driver_data; 2525 unsigned long quirks = id->driver_data;
2526 size_t alloc_size;
2512 2527
2513 node = dev_to_node(&pdev->dev); 2528 node = dev_to_node(&pdev->dev);
2514 if (node == NUMA_NO_NODE) 2529 if (node == NUMA_NO_NODE)
@@ -2546,6 +2561,23 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2546 if (result) 2561 if (result)
2547 goto release_pools; 2562 goto release_pools;
2548 2563
2564 /*
2565 * Double check that our mempool alloc size will cover the biggest
2566 * command we support.
2567 */
2568 alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
2569 NVME_MAX_SEGS, true);
2570 WARN_ON_ONCE(alloc_size > PAGE_SIZE);
2571
2572 dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
2573 mempool_kfree,
2574 (void *) alloc_size,
2575 GFP_KERNEL, node);
2576 if (!dev->iod_mempool) {
2577 result = -ENOMEM;
2578 goto release_pools;
2579 }
2580
2549 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2581 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2550 2582
2551 nvme_get_ctrl(&dev->ctrl); 2583 nvme_get_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c9424da0d23e..518c5b09038c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
561 return; 561 return;
562 562
563 if (nvme_rdma_queue_idx(queue) == 0) {
564 nvme_rdma_free_qe(queue->device->dev,
565 &queue->ctrl->async_event_sqe,
566 sizeof(struct nvme_command), DMA_TO_DEVICE);
567 }
568
569 nvme_rdma_destroy_queue_ib(queue); 563 nvme_rdma_destroy_queue_ib(queue);
570 rdma_destroy_id(queue->cm_id); 564 rdma_destroy_id(queue->cm_id);
571} 565}
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
698 set = &ctrl->tag_set; 692 set = &ctrl->tag_set;
699 memset(set, 0, sizeof(*set)); 693 memset(set, 0, sizeof(*set));
700 set->ops = &nvme_rdma_mq_ops; 694 set->ops = &nvme_rdma_mq_ops;
701 set->queue_depth = nctrl->opts->queue_size; 695 set->queue_depth = nctrl->sqsize + 1;
702 set->reserved_tags = 1; /* fabric connect */ 696 set->reserved_tags = 1; /* fabric connect */
703 set->numa_node = NUMA_NO_NODE; 697 set->numa_node = NUMA_NO_NODE;
704 set->flags = BLK_MQ_F_SHOULD_MERGE; 698 set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,15 @@ out:
734static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, 728static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
735 bool remove) 729 bool remove)
736{ 730{
737 nvme_rdma_stop_queue(&ctrl->queues[0]);
738 if (remove) { 731 if (remove) {
739 blk_cleanup_queue(ctrl->ctrl.admin_q); 732 blk_cleanup_queue(ctrl->ctrl.admin_q);
740 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 733 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
741 } 734 }
735 if (ctrl->async_event_sqe.data) {
736 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
737 sizeof(struct nvme_command), DMA_TO_DEVICE);
738 ctrl->async_event_sqe.data = NULL;
739 }
742 nvme_rdma_free_queue(&ctrl->queues[0]); 740 nvme_rdma_free_queue(&ctrl->queues[0]);
743} 741}
744 742
@@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
755 753
756 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); 754 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
757 755
756 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
757 sizeof(struct nvme_command), DMA_TO_DEVICE);
758 if (error)
759 goto out_free_queue;
760
758 if (new) { 761 if (new) {
759 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 762 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
760 if (IS_ERR(ctrl->ctrl.admin_tagset)) { 763 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
761 error = PTR_ERR(ctrl->ctrl.admin_tagset); 764 error = PTR_ERR(ctrl->ctrl.admin_tagset);
762 goto out_free_queue; 765 goto out_free_async_qe;
763 } 766 }
764 767
765 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 768 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
795 if (error) 798 if (error)
796 goto out_stop_queue; 799 goto out_stop_queue;
797 800
798 error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
799 &ctrl->async_event_sqe, sizeof(struct nvme_command),
800 DMA_TO_DEVICE);
801 if (error)
802 goto out_stop_queue;
803
804 return 0; 801 return 0;
805 802
806out_stop_queue: 803out_stop_queue:
@@ -811,6 +808,9 @@ out_cleanup_queue:
811out_free_tagset: 808out_free_tagset:
812 if (new) 809 if (new)
813 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 810 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
811out_free_async_qe:
812 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
813 sizeof(struct nvme_command), DMA_TO_DEVICE);
814out_free_queue: 814out_free_queue:
815 nvme_rdma_free_queue(&ctrl->queues[0]); 815 nvme_rdma_free_queue(&ctrl->queues[0]);
816 return error; 816 return error;
@@ -819,7 +819,6 @@ out_free_queue:
819static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, 819static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
820 bool remove) 820 bool remove)
821{ 821{
822 nvme_rdma_stop_io_queues(ctrl);
823 if (remove) { 822 if (remove) {
824 blk_cleanup_queue(ctrl->ctrl.connect_q); 823 blk_cleanup_queue(ctrl->ctrl.connect_q);
825 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); 824 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
888 list_del(&ctrl->list); 887 list_del(&ctrl->list);
889 mutex_unlock(&nvme_rdma_ctrl_mutex); 888 mutex_unlock(&nvme_rdma_ctrl_mutex);
890 889
891 kfree(ctrl->queues);
892 nvmf_free_options(nctrl->opts); 890 nvmf_free_options(nctrl->opts);
893free_ctrl: 891free_ctrl:
892 kfree(ctrl->queues);
894 kfree(ctrl); 893 kfree(ctrl);
895} 894}
896 895
@@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
949 return; 948 return;
950 949
951destroy_admin: 950destroy_admin:
951 nvme_rdma_stop_queue(&ctrl->queues[0]);
952 nvme_rdma_destroy_admin_queue(ctrl, false); 952 nvme_rdma_destroy_admin_queue(ctrl, false);
953requeue: 953requeue:
954 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 954 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
965 965
966 if (ctrl->ctrl.queue_count > 1) { 966 if (ctrl->ctrl.queue_count > 1) {
967 nvme_stop_queues(&ctrl->ctrl); 967 nvme_stop_queues(&ctrl->ctrl);
968 nvme_rdma_stop_io_queues(ctrl);
968 blk_mq_tagset_busy_iter(&ctrl->tag_set, 969 blk_mq_tagset_busy_iter(&ctrl->tag_set,
969 nvme_cancel_request, &ctrl->ctrl); 970 nvme_cancel_request, &ctrl->ctrl);
970 nvme_rdma_destroy_io_queues(ctrl, false); 971 nvme_rdma_destroy_io_queues(ctrl, false);
971 } 972 }
972 973
973 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 974 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
975 nvme_rdma_stop_queue(&ctrl->queues[0]);
974 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 976 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
975 nvme_cancel_request, &ctrl->ctrl); 977 nvme_cancel_request, &ctrl->ctrl);
976 nvme_rdma_destroy_admin_queue(ctrl, false); 978 nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1736{ 1738{
1737 if (ctrl->ctrl.queue_count > 1) { 1739 if (ctrl->ctrl.queue_count > 1) {
1738 nvme_stop_queues(&ctrl->ctrl); 1740 nvme_stop_queues(&ctrl->ctrl);
1741 nvme_rdma_stop_io_queues(ctrl);
1739 blk_mq_tagset_busy_iter(&ctrl->tag_set, 1742 blk_mq_tagset_busy_iter(&ctrl->tag_set,
1740 nvme_cancel_request, &ctrl->ctrl); 1743 nvme_cancel_request, &ctrl->ctrl);
1741 nvme_rdma_destroy_io_queues(ctrl, shutdown); 1744 nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
1747 nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 1750 nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
1748 1751
1749 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 1752 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
1753 nvme_rdma_stop_queue(&ctrl->queues[0]);
1750 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 1754 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
1751 nvme_cancel_request, &ctrl->ctrl); 1755 nvme_cancel_request, &ctrl->ctrl);
1752 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 1756 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1932 goto out_free_ctrl; 1936 goto out_free_ctrl;
1933 } 1937 }
1934 1938
1935 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1936 0 /* no quirks, we're perfect! */);
1937 if (ret)
1938 goto out_free_ctrl;
1939
1940 INIT_DELAYED_WORK(&ctrl->reconnect_work, 1939 INIT_DELAYED_WORK(&ctrl->reconnect_work,
1941 nvme_rdma_reconnect_ctrl_work); 1940 nvme_rdma_reconnect_ctrl_work);
1942 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); 1941 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1950 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 1949 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
1951 GFP_KERNEL); 1950 GFP_KERNEL);
1952 if (!ctrl->queues) 1951 if (!ctrl->queues)
1953 goto out_uninit_ctrl; 1952 goto out_free_ctrl;
1953
1954 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
1955 0 /* no quirks, we're perfect! */);
1956 if (ret)
1957 goto out_kfree_queues;
1954 1958
1955 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); 1959 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
1956 WARN_ON_ONCE(!changed); 1960 WARN_ON_ONCE(!changed);
1957 1961
1958 ret = nvme_rdma_configure_admin_queue(ctrl, true); 1962 ret = nvme_rdma_configure_admin_queue(ctrl, true);
1959 if (ret) 1963 if (ret)
1960 goto out_kfree_queues; 1964 goto out_uninit_ctrl;
1961 1965
1962 /* sanity check icdoff */ 1966 /* sanity check icdoff */
1963 if (ctrl->ctrl.icdoff) { 1967 if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
1974 goto out_remove_admin_queue; 1978 goto out_remove_admin_queue;
1975 } 1979 }
1976 1980
1977 if (opts->queue_size > ctrl->ctrl.maxcmd) { 1981 /* only warn if argument is too large here, will clamp later */
1978 /* warn if maxcmd is lower than queue_size */
1979 dev_warn(ctrl->ctrl.device,
1980 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
1981 opts->queue_size, ctrl->ctrl.maxcmd);
1982 opts->queue_size = ctrl->ctrl.maxcmd;
1983 }
1984
1985 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 1982 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
1986 /* warn if sqsize is lower than queue_size */
1987 dev_warn(ctrl->ctrl.device, 1983 dev_warn(ctrl->ctrl.device,
1988 "queue_size %zu > ctrl sqsize %u, clamping down\n", 1984 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1989 opts->queue_size, ctrl->ctrl.sqsize + 1); 1985 opts->queue_size, ctrl->ctrl.sqsize + 1);
1990 opts->queue_size = ctrl->ctrl.sqsize + 1; 1986 }
1987
1988 /* warn if maxcmd is lower than sqsize+1 */
1989 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1990 dev_warn(ctrl->ctrl.device,
1991 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1992 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1993 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1991 } 1994 }
1992 1995
1993 if (opts->nr_io_queues) { 1996 if (opts->nr_io_queues) {
@@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2013 return &ctrl->ctrl; 2016 return &ctrl->ctrl;
2014 2017
2015out_remove_admin_queue: 2018out_remove_admin_queue:
2019 nvme_rdma_stop_queue(&ctrl->queues[0]);
2016 nvme_rdma_destroy_admin_queue(ctrl, true); 2020 nvme_rdma_destroy_admin_queue(ctrl, true);
2017out_kfree_queues:
2018 kfree(ctrl->queues);
2019out_uninit_ctrl: 2021out_uninit_ctrl:
2020 nvme_uninit_ctrl(&ctrl->ctrl); 2022 nvme_uninit_ctrl(&ctrl->ctrl);
2021 nvme_put_ctrl(&ctrl->ctrl); 2023 nvme_put_ctrl(&ctrl->ctrl);
2022 if (ret > 0) 2024 if (ret > 0)
2023 ret = -EIO; 2025 ret = -EIO;
2024 return ERR_PTR(ret); 2026 return ERR_PTR(ret);
2027out_kfree_queues:
2028 kfree(ctrl->queues);
2025out_free_ctrl: 2029out_free_ctrl:
2026 kfree(ctrl); 2030 kfree(ctrl);
2027 return ERR_PTR(ret); 2031 return ERR_PTR(ret);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a03da764ecae..74d4b785d2da 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
686 } 686 }
687 687
688 ctrl->csts = NVME_CSTS_RDY; 688 ctrl->csts = NVME_CSTS_RDY;
689
690 /*
691 * Controllers that are not yet enabled should not really enforce the
692 * keep alive timeout, but we still want to track a timeout and cleanup
693 * in case a host died before it enabled the controller. Hence, simply
694 * reset the keep alive timer when the controller is enabled.
695 */
696 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
689} 697}
690 698
691static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) 699static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index b5b0cdc21d01..514d1dfc5630 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
936 return cell; 936 return cell;
937 } 937 }
938 938
939 /* NULL cell_id only allowed for device tree; invalid otherwise */
940 if (!cell_id)
941 return ERR_PTR(-EINVAL);
942
939 return nvmem_cell_get_from_list(cell_id); 943 return nvmem_cell_get_from_list(cell_id);
940} 944}
941EXPORT_SYMBOL_GPL(nvmem_cell_get); 945EXPORT_SYMBOL_GPL(nvmem_cell_get);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index ab2f3fead6b1..31ff03dbeb83 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
598 } 598 }
599 599
600 /* Scaling up? Scale voltage before frequency */ 600 /* Scaling up? Scale voltage before frequency */
601 if (freq > old_freq) { 601 if (freq >= old_freq) {
602 ret = _set_opp_voltage(dev, reg, new_supply); 602 ret = _set_opp_voltage(dev, reg, new_supply);
603 if (ret) 603 if (ret)
604 goto restore_voltage; 604 goto restore_voltage;
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 535201984b8b..1b2cfe51e8d7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
28obj-$(CONFIG_PCI_ECAM) += ecam.o 28obj-$(CONFIG_PCI_ECAM) += ecam.o
29obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o 29obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
30 30
31obj-y += controller/
32obj-y += switch/
33
34# Endpoint library must be initialized before its users 31# Endpoint library must be initialized before its users
35obj-$(CONFIG_PCI_ENDPOINT) += endpoint/ 32obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
36 33
34obj-y += controller/
35obj-y += switch/
36
37ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG 37ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 18fa09b3ac8f..cc9fa02d32a0 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -96,7 +96,6 @@ config PCI_HOST_GENERIC
96 depends on OF 96 depends on OF
97 select PCI_HOST_COMMON 97 select PCI_HOST_COMMON
98 select IRQ_DOMAIN 98 select IRQ_DOMAIN
99 select PCI_DOMAINS
100 help 99 help
101 Say Y here if you want to support a simple generic PCI host 100 Say Y here if you want to support a simple generic PCI host
102 controller, such as the one emulated by kvmtool. 101 controller, such as the one emulated by kvmtool.
@@ -138,7 +137,6 @@ config PCI_VERSATILE
138 137
139config PCIE_IPROC 138config PCIE_IPROC
140 tristate 139 tristate
141 select PCI_DOMAINS
142 help 140 help
143 This enables the iProc PCIe core controller support for Broadcom's 141 This enables the iProc PCIe core controller support for Broadcom's
144 iProc family of SoCs. An appropriate bus interface driver needs 142 iProc family of SoCs. An appropriate bus interface driver needs
@@ -176,7 +174,6 @@ config PCIE_IPROC_MSI
176config PCIE_ALTERA 174config PCIE_ALTERA
177 bool "Altera PCIe controller" 175 bool "Altera PCIe controller"
178 depends on ARM || NIOS2 || COMPILE_TEST 176 depends on ARM || NIOS2 || COMPILE_TEST
179 select PCI_DOMAINS
180 help 177 help
181 Say Y here if you want to enable PCIe controller support on Altera 178 Say Y here if you want to enable PCIe controller support on Altera
182 FPGA. 179 FPGA.
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 16f52c626b4b..91b0194240a5 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST
58 depends on PCI && PCI_MSI_IRQ_DOMAIN 58 depends on PCI && PCI_MSI_IRQ_DOMAIN
59 select PCIE_DW_HOST 59 select PCIE_DW_HOST
60 select PCIE_DW_PLAT 60 select PCIE_DW_PLAT
61 default y
62 help 61 help
63 Enables support for the PCIe controller in the Designware IP to 62 Enables support for the PCIe controller in the Designware IP to
64 work in host mode. There are two instances of PCIe controller in 63 work in host mode. There are two instances of PCIe controller in
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index a1ebe9ed441f..20bb2564a6b3 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
355 irq = of_irq_get(intc, 0); 355 irq = of_irq_get(intc, 0);
356 if (irq <= 0) { 356 if (irq <= 0) {
357 dev_err(p->dev, "failed to get parent IRQ\n"); 357 dev_err(p->dev, "failed to get parent IRQ\n");
358 of_node_put(intc);
358 return irq ?: -EINVAL; 359 return irq ?: -EINVAL;
359 } 360 }
360 361
361 p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, 362 p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
362 &faraday_pci_irqdomain_ops, p); 363 &faraday_pci_irqdomain_ops, p);
364 of_node_put(intc);
363 if (!p->irqdomain) { 365 if (!p->irqdomain) {
364 dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); 366 dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
365 return -EINVAL; 367 return -EINVAL;
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 874d75c9ee4a..c8febb009454 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
680 if (err) 680 if (err)
681 return err; 681 return err;
682 682
683 return phy_power_on(pcie->phy); 683 err = phy_power_on(pcie->phy);
684 if (err)
685 phy_exit(pcie->phy);
686
687 return err;
684} 688}
685 689
686static int rcar_msi_alloc(struct rcar_msi *chip) 690static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1165 if (rcar_pcie_hw_init(pcie)) { 1169 if (rcar_pcie_hw_init(pcie)) {
1166 dev_info(dev, "PCIe link down\n"); 1170 dev_info(dev, "PCIe link down\n");
1167 err = -ENODEV; 1171 err = -ENODEV;
1168 goto err_clk_disable; 1172 goto err_phy_shutdown;
1169 } 1173 }
1170 1174
1171 data = rcar_pci_read_reg(pcie, MACSR); 1175 data = rcar_pci_read_reg(pcie, MACSR);
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1177 dev_err(dev, 1181 dev_err(dev,
1178 "failed to enable MSI support: %d\n", 1182 "failed to enable MSI support: %d\n",
1179 err); 1183 err);
1180 goto err_clk_disable; 1184 goto err_phy_shutdown;
1181 } 1185 }
1182 } 1186 }
1183 1187
@@ -1191,6 +1195,12 @@ err_msi_teardown:
1191 if (IS_ENABLED(CONFIG_PCI_MSI)) 1195 if (IS_ENABLED(CONFIG_PCI_MSI))
1192 rcar_pcie_teardown_msi(pcie); 1196 rcar_pcie_teardown_msi(pcie);
1193 1197
1198err_phy_shutdown:
1199 if (pcie->phy) {
1200 phy_power_off(pcie->phy);
1201 phy_exit(pcie->phy);
1202 }
1203
1194err_clk_disable: 1204err_clk_disable:
1195 clk_disable_unprepare(pcie->bus_clk); 1205 clk_disable_unprepare(pcie->bus_clk);
1196 1206
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 6a4bbb5b3de0..fb32840ce8e6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
559 PCI_NUM_INTX, 559 PCI_NUM_INTX,
560 &legacy_domain_ops, 560 &legacy_domain_ops,
561 pcie); 561 pcie);
562 562 of_node_put(legacy_intc_node);
563 if (!pcie->legacy_irq_domain) { 563 if (!pcie->legacy_irq_domain) {
564 dev_err(dev, "failed to create IRQ domain\n"); 564 dev_err(dev, "failed to create IRQ domain\n");
565 return -ENOMEM; 565 return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index b110a3a814e3..7b1389d8e2a5 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
509 port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 509 port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
510 &intx_domain_ops, 510 &intx_domain_ops,
511 port); 511 port);
512 of_node_put(pcie_intc_node);
512 if (!port->leg_domain) { 513 if (!port->leg_domain) {
513 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 514 dev_err(dev, "Failed to get a INTx IRQ domain\n");
514 return -ENODEV; 515 return -ENODEV;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 523a8cab3bfb..bf53fad636a5 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -145,10 +145,10 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
145 */ 145 */
146void pci_epf_unregister_driver(struct pci_epf_driver *driver) 146void pci_epf_unregister_driver(struct pci_epf_driver *driver)
147{ 147{
148 struct config_group *group; 148 struct config_group *group, *tmp;
149 149
150 mutex_lock(&pci_epf_mutex); 150 mutex_lock(&pci_epf_mutex);
151 list_for_each_entry(group, &driver->epf_group, group_entry) 151 list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
152 pci_ep_cfs_remove_epf_group(group); 152 pci_ep_cfs_remove_epf_group(group);
153 list_del(&driver->epf_group); 153 list_del(&driver->epf_group);
154 mutex_unlock(&pci_epf_mutex); 154 mutex_unlock(&pci_epf_mutex);
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 3979f89b250a..5bd6c1573295 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -7,7 +7,6 @@
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Send feedback to <kristen.c.accardi@intel.com> 9 * Send feedback to <kristen.c.accardi@intel.com>
10 *
11 */ 10 */
12 11
13#include <linux/module.h> 12#include <linux/module.h>
@@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
87 return 0; 86 return 0;
88 87
89 /* If _OSC exists, we should not evaluate OSHP */ 88 /* If _OSC exists, we should not evaluate OSHP */
89
90 /*
91 * If there's no ACPI host bridge (i.e., ACPI support is compiled
92 * into the kernel but the hardware platform doesn't support ACPI),
93 * there's nothing to do here.
94 */
90 host = pci_find_host_bridge(pdev->bus); 95 host = pci_find_host_bridge(pdev->bus);
91 root = acpi_pci_find_root(ACPI_HANDLE(&host->dev)); 96 root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
97 if (!root)
98 return 0;
99
92 if (root->osc_support_set) 100 if (root->osc_support_set)
93 goto no_control; 101 goto no_control;
94 102
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d0d73dbbd5ca..0f04ae648cf1 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -575,6 +575,22 @@ void pci_iov_release(struct pci_dev *dev)
575} 575}
576 576
577/** 577/**
578 * pci_iov_remove - clean up SR-IOV state after PF driver is detached
579 * @dev: the PCI device
580 */
581void pci_iov_remove(struct pci_dev *dev)
582{
583 struct pci_sriov *iov = dev->sriov;
584
585 if (!dev->is_physfn)
586 return;
587
588 iov->driver_max_VFs = iov->total_VFs;
589 if (iov->num_VFs)
590 pci_warn(dev, "driver left SR-IOV enabled after remove\n");
591}
592
593/**
578 * pci_iov_update_resource - update a VF BAR 594 * pci_iov_update_resource - update a VF BAR
579 * @dev: the PCI device 595 * @dev: the PCI device
580 * @resno: the resource number 596 * @resno: the resource number
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 65113b6eed14..89ee6a2b6eb8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
629{ 629{
630 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 630 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
631 631
632 /*
633 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
634 * system-wide suspend/resume confuses the platform firmware, so avoid
635 * doing that, unless the bridge has a driver that should take care of
636 * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint
637 * devices are expected to be in D3 before invoking the S3 entry path
638 * from the firmware, so they should not be affected by this issue.
639 */
640 if (pci_is_bridge(dev) && !dev->driver &&
641 acpi_target_system_state() != ACPI_STATE_S0)
642 return true;
643
632 if (!adev || !acpi_device_power_manageable(adev)) 644 if (!adev || !acpi_device_power_manageable(adev))
633 return false; 645 return false;
634 646
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c125d53033c6..6792292b5fc7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev)
445 } 445 }
446 pcibios_free_irq(pci_dev); 446 pcibios_free_irq(pci_dev);
447 pci_dev->driver = NULL; 447 pci_dev->driver = NULL;
448 pci_iov_remove(pci_dev);
448 } 449 }
449 450
450 /* Undo the runtime PM settings in local_pci_probe() */ 451 /* Undo the runtime PM settings in local_pci_probe() */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c358e7a07f3f..882f1f9596df 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
311#ifdef CONFIG_PCI_IOV 311#ifdef CONFIG_PCI_IOV
312int pci_iov_init(struct pci_dev *dev); 312int pci_iov_init(struct pci_dev *dev);
313void pci_iov_release(struct pci_dev *dev); 313void pci_iov_release(struct pci_dev *dev);
314void pci_iov_remove(struct pci_dev *dev);
314void pci_iov_update_resource(struct pci_dev *dev, int resno); 315void pci_iov_update_resource(struct pci_dev *dev, int resno);
315resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 316resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
316void pci_restore_iov_state(struct pci_dev *dev); 317void pci_restore_iov_state(struct pci_dev *dev);
@@ -325,6 +326,9 @@ static inline void pci_iov_release(struct pci_dev *dev)
325 326
326{ 327{
327} 328}
329static inline void pci_iov_remove(struct pci_dev *dev)
330{
331}
328static inline void pci_restore_iov_state(struct pci_dev *dev) 332static inline void pci_restore_iov_state(struct pci_dev *dev)
329{ 333{
330} 334}
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 6bdb1dad805f..0e31f1392a53 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
1463 case PMU_TYPE_IOB: 1463 case PMU_TYPE_IOB:
1464 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); 1464 return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
1465 case PMU_TYPE_IOB_SLOW: 1465 case PMU_TYPE_IOB_SLOW:
1466 return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id); 1466 return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
1467 case PMU_TYPE_MCB: 1467 case PMU_TYPE_MCB:
1468 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); 1468 return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
1469 case PMU_TYPE_MC: 1469 case PMU_TYPE_MC:
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 76243caa08c6..b5c880b50bb3 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
333 unsigned long flags; 333 unsigned long flags;
334 unsigned int param; 334 unsigned int param;
335 u32 reg, bit, width, arg; 335 u32 reg, bit, width, arg;
336 int ret, i; 336 int ret = 0, i;
337 337
338 info = &pctrl->soc->padinfo[pin]; 338 info = &pctrl->soc->padinfo[pin];
339 339
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index b601039d6c69..c4aa411f5935 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
101} 101}
102 102
103static int dt_to_map_one_config(struct pinctrl *p, 103static int dt_to_map_one_config(struct pinctrl *p,
104 struct pinctrl_dev *pctldev, 104 struct pinctrl_dev *hog_pctldev,
105 const char *statename, 105 const char *statename,
106 struct device_node *np_config) 106 struct device_node *np_config)
107{ 107{
108 struct pinctrl_dev *pctldev = NULL;
108 struct device_node *np_pctldev; 109 struct device_node *np_pctldev;
109 const struct pinctrl_ops *ops; 110 const struct pinctrl_ops *ops;
110 int ret; 111 int ret;
@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
123 return -EPROBE_DEFER; 124 return -EPROBE_DEFER;
124 } 125 }
125 /* If we're creating a hog we can use the passed pctldev */ 126 /* If we're creating a hog we can use the passed pctldev */
126 if (pctldev && (np_pctldev == p->dev->of_node)) 127 if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
128 pctldev = hog_pctldev;
127 break; 129 break;
130 }
128 pctldev = get_pinctrl_dev_from_of_node(np_pctldev); 131 pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
129 if (pctldev) 132 if (pctldev)
130 break; 133 break;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index ad6da1184c9f..e3f1ab2290fc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
1459 struct mtk_pinctrl *hw = gpiochip_get_data(chip); 1459 struct mtk_pinctrl *hw = gpiochip_get_data(chip);
1460 unsigned long eint_n; 1460 unsigned long eint_n;
1461 1461
1462 if (!hw->eint)
1463 return -ENOTSUPP;
1464
1462 eint_n = offset; 1465 eint_n = offset;
1463 1466
1464 return mtk_eint_find_irq(hw->eint, eint_n); 1467 return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
1471 unsigned long eint_n; 1474 unsigned long eint_n;
1472 u32 debounce; 1475 u32 debounce;
1473 1476
1474 if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) 1477 if (!hw->eint ||
1478 pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
1475 return -ENOTSUPP; 1479 return -ENOTSUPP;
1476 1480
1477 debounce = pinconf_to_config_argument(config); 1481 debounce = pinconf_to_config_argument(config);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index b3799695d8db..16ff56f93501 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
1000 return -ENOMEM; 1000 return -ENOMEM;
1001 1001
1002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1003 if (!res) {
1004 dev_err(&pdev->dev, "Unable to get eint resource\n");
1005 return -ENODEV;
1006 }
1007
1008 pctl->eint->base = devm_ioremap_resource(&pdev->dev, res); 1003 pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
1009 if (IS_ERR(pctl->eint->base)) 1004 if (IS_ERR(pctl->eint->base))
1010 return PTR_ERR(pctl->eint->base); 1005 return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b3153c095199..e5647dac0818 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs)
1590 1590
1591 mux_bytes = pcs->width / BITS_PER_BYTE; 1591 mux_bytes = pcs->width / BITS_PER_BYTE;
1592 1592
1593 if (!pcs->saved_vals) 1593 if (!pcs->saved_vals) {
1594 pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC); 1594 pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
1595 if (!pcs->saved_vals)
1596 return -ENOMEM;
1597 }
1595 1598
1596 switch (pcs->width) { 1599 switch (pcs->width) {
1597 case 64: 1600 case 64:
@@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
1651 if (!pcs) 1654 if (!pcs)
1652 return -EINVAL; 1655 return -EINVAL;
1653 1656
1654 if (pcs->flags & PCS_CONTEXT_LOSS_OFF) 1657 if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
1655 pcs_save_context(pcs); 1658 int ret;
1659
1660 ret = pcs_save_context(pcs);
1661 if (ret < 0)
1662 return ret;
1663 }
1656 1664
1657 return pinctrl_force_sleep(pcs->pctl); 1665 return pinctrl_force_sleep(pcs->pctl);
1658} 1666}
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 767c485af59b..547dbdac9d54 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -221,7 +221,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
221 } 221 }
222 pct = &sysoff->ts[0]; 222 pct = &sysoff->ts[0];
223 for (i = 0; i < sysoff->n_samples; i++) { 223 for (i = 0; i < sysoff->n_samples; i++) {
224 getnstimeofday64(&ts); 224 ktime_get_real_ts64(&ts);
225 pct->sec = ts.tv_sec; 225 pct->sec = ts.tv_sec;
226 pct->nsec = ts.tv_nsec; 226 pct->nsec = ts.tv_nsec;
227 pct++; 227 pct++;
@@ -230,7 +230,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
230 pct->nsec = ts.tv_nsec; 230 pct->nsec = ts.tv_nsec;
231 pct++; 231 pct++;
232 } 232 }
233 getnstimeofday64(&ts); 233 ktime_get_real_ts64(&ts);
234 pct->sec = ts.tv_sec; 234 pct->sec = ts.tv_sec;
235 pct->nsec = ts.tv_nsec; 235 pct->nsec = ts.tv_nsec;
236 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff))) 236 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 1468a1642b49..e8652c148c52 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
374 pr_err("ioremap ptp registers failed\n"); 374 pr_err("ioremap ptp registers failed\n");
375 goto no_ioremap; 375 goto no_ioremap;
376 } 376 }
377 getnstimeofday64(&now); 377 ktime_get_real_ts64(&now);
378 ptp_qoriq_settime(&qoriq_ptp->caps, &now); 378 ptp_qoriq_settime(&qoriq_ptp->caps, &now);
379 379
380 tmr_ctrl = 380 tmr_ctrl =
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 73cce3ecb97f..a9f60d0ee02e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -41,6 +41,15 @@
41 41
42#define DASD_DIAG_MOD "dasd_diag_mod" 42#define DASD_DIAG_MOD "dasd_diag_mod"
43 43
44static unsigned int queue_depth = 32;
45static unsigned int nr_hw_queues = 4;
46
47module_param(queue_depth, uint, 0444);
48MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
49
50module_param(nr_hw_queues, uint, 0444);
51MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
52
44/* 53/*
45 * SECTION: exported variables of dasd.c 54 * SECTION: exported variables of dasd.c
46 */ 55 */
@@ -1222,80 +1231,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
1222 device->hosts_dentry = pde; 1231 device->hosts_dentry = pde;
1223} 1232}
1224 1233
1225/* 1234struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1226 * Allocate memory for a channel program with 'cplength' channel 1235 struct dasd_device *device,
1227 * command words and 'datasize' additional space. There are two 1236 struct dasd_ccw_req *cqr)
1228 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
1229 * memory and 2) dasd_smalloc_request uses the static ccw memory
1230 * that gets allocated for each device.
1231 */
1232struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
1233 int datasize,
1234 struct dasd_device *device)
1235{
1236 struct dasd_ccw_req *cqr;
1237
1238 /* Sanity checks */
1239 BUG_ON(datasize > PAGE_SIZE ||
1240 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
1241
1242 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
1243 if (cqr == NULL)
1244 return ERR_PTR(-ENOMEM);
1245 cqr->cpaddr = NULL;
1246 if (cplength > 0) {
1247 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
1248 GFP_ATOMIC | GFP_DMA);
1249 if (cqr->cpaddr == NULL) {
1250 kfree(cqr);
1251 return ERR_PTR(-ENOMEM);
1252 }
1253 }
1254 cqr->data = NULL;
1255 if (datasize > 0) {
1256 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
1257 if (cqr->data == NULL) {
1258 kfree(cqr->cpaddr);
1259 kfree(cqr);
1260 return ERR_PTR(-ENOMEM);
1261 }
1262 }
1263 cqr->magic = magic;
1264 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1265 dasd_get_device(device);
1266 return cqr;
1267}
1268EXPORT_SYMBOL(dasd_kmalloc_request);
1269
1270struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
1271 int datasize,
1272 struct dasd_device *device)
1273{ 1237{
1274 unsigned long flags; 1238 unsigned long flags;
1275 struct dasd_ccw_req *cqr; 1239 char *data, *chunk;
1276 char *data; 1240 int size = 0;
1277 int size;
1278 1241
1279 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
1280 if (cplength > 0) 1242 if (cplength > 0)
1281 size += cplength * sizeof(struct ccw1); 1243 size += cplength * sizeof(struct ccw1);
1282 if (datasize > 0) 1244 if (datasize > 0)
1283 size += datasize; 1245 size += datasize;
1246 if (!cqr)
1247 size += (sizeof(*cqr) + 7L) & -8L;
1248
1284 spin_lock_irqsave(&device->mem_lock, flags); 1249 spin_lock_irqsave(&device->mem_lock, flags);
1285 cqr = (struct dasd_ccw_req *) 1250 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1286 dasd_alloc_chunk(&device->ccw_chunks, size);
1287 spin_unlock_irqrestore(&device->mem_lock, flags); 1251 spin_unlock_irqrestore(&device->mem_lock, flags);
1288 if (cqr == NULL) 1252 if (!chunk)
1289 return ERR_PTR(-ENOMEM); 1253 return ERR_PTR(-ENOMEM);
1290 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1254 if (!cqr) {
1291 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1255 cqr = (void *) data;
1292 cqr->cpaddr = NULL; 1256 data += (sizeof(*cqr) + 7L) & -8L;
1257 }
1258 memset(cqr, 0, sizeof(*cqr));
1259 cqr->mem_chunk = chunk;
1293 if (cplength > 0) { 1260 if (cplength > 0) {
1294 cqr->cpaddr = (struct ccw1 *) data; 1261 cqr->cpaddr = data;
1295 data += cplength*sizeof(struct ccw1); 1262 data += cplength * sizeof(struct ccw1);
1296 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1263 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1297 } 1264 }
1298 cqr->data = NULL;
1299 if (datasize > 0) { 1265 if (datasize > 0) {
1300 cqr->data = data; 1266 cqr->data = data;
1301 memset(cqr->data, 0, datasize); 1267 memset(cqr->data, 0, datasize);
@@ -1307,33 +1273,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
1307} 1273}
1308EXPORT_SYMBOL(dasd_smalloc_request); 1274EXPORT_SYMBOL(dasd_smalloc_request);
1309 1275
1310/*
1311 * Free memory of a channel program. This function needs to free all the
1312 * idal lists that might have been created by dasd_set_cda and the
1313 * struct dasd_ccw_req itself.
1314 */
1315void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1316{
1317 struct ccw1 *ccw;
1318
1319 /* Clear any idals used for the request. */
1320 ccw = cqr->cpaddr;
1321 do {
1322 clear_normalized_cda(ccw);
1323 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
1324 kfree(cqr->cpaddr);
1325 kfree(cqr->data);
1326 kfree(cqr);
1327 dasd_put_device(device);
1328}
1329EXPORT_SYMBOL(dasd_kfree_request);
1330
1331void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1276void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1332{ 1277{
1333 unsigned long flags; 1278 unsigned long flags;
1334 1279
1335 spin_lock_irqsave(&device->mem_lock, flags); 1280 spin_lock_irqsave(&device->mem_lock, flags);
1336 dasd_free_chunk(&device->ccw_chunks, cqr); 1281 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1337 spin_unlock_irqrestore(&device->mem_lock, flags); 1282 spin_unlock_irqrestore(&device->mem_lock, flags);
1338 dasd_put_device(device); 1283 dasd_put_device(device);
1339} 1284}
@@ -1885,6 +1830,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1885 } 1830 }
1886} 1831}
1887 1832
1833static void __dasd_process_cqr(struct dasd_device *device,
1834 struct dasd_ccw_req *cqr)
1835{
1836 char errorstring[ERRORLENGTH];
1837
1838 switch (cqr->status) {
1839 case DASD_CQR_SUCCESS:
1840 cqr->status = DASD_CQR_DONE;
1841 break;
1842 case DASD_CQR_ERROR:
1843 cqr->status = DASD_CQR_NEED_ERP;
1844 break;
1845 case DASD_CQR_CLEARED:
1846 cqr->status = DASD_CQR_TERMINATED;
1847 break;
1848 default:
1849 /* internal error 12 - wrong cqr status*/
1850 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1851 dev_err(&device->cdev->dev,
1852 "An error occurred in the DASD device driver, "
1853 "reason=%s\n", errorstring);
1854 BUG();
1855 }
1856 if (cqr->callback)
1857 cqr->callback(cqr, cqr->callback_data);
1858}
1859
1888/* 1860/*
1889 * the cqrs from the final queue are returned to the upper layer 1861 * the cqrs from the final queue are returned to the upper layer
1890 * by setting a dasd_block state and calling the callback function 1862 * by setting a dasd_block state and calling the callback function
@@ -1895,40 +1867,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1895 struct list_head *l, *n; 1867 struct list_head *l, *n;
1896 struct dasd_ccw_req *cqr; 1868 struct dasd_ccw_req *cqr;
1897 struct dasd_block *block; 1869 struct dasd_block *block;
1898 void (*callback)(struct dasd_ccw_req *, void *data);
1899 void *callback_data;
1900 char errorstring[ERRORLENGTH];
1901 1870
1902 list_for_each_safe(l, n, final_queue) { 1871 list_for_each_safe(l, n, final_queue) {
1903 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1872 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1904 list_del_init(&cqr->devlist); 1873 list_del_init(&cqr->devlist);
1905 block = cqr->block; 1874 block = cqr->block;
1906 callback = cqr->callback; 1875 if (!block) {
1907 callback_data = cqr->callback_data; 1876 __dasd_process_cqr(device, cqr);
1908 if (block) 1877 } else {
1909 spin_lock_bh(&block->queue_lock); 1878 spin_lock_bh(&block->queue_lock);
1910 switch (cqr->status) { 1879 __dasd_process_cqr(device, cqr);
1911 case DASD_CQR_SUCCESS:
1912 cqr->status = DASD_CQR_DONE;
1913 break;
1914 case DASD_CQR_ERROR:
1915 cqr->status = DASD_CQR_NEED_ERP;
1916 break;
1917 case DASD_CQR_CLEARED:
1918 cqr->status = DASD_CQR_TERMINATED;
1919 break;
1920 default:
1921 /* internal error 12 - wrong cqr status*/
1922 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1923 dev_err(&device->cdev->dev,
1924 "An error occurred in the DASD device driver, "
1925 "reason=%s\n", errorstring);
1926 BUG();
1927 }
1928 if (cqr->callback != NULL)
1929 (callback)(cqr, callback_data);
1930 if (block)
1931 spin_unlock_bh(&block->queue_lock); 1880 spin_unlock_bh(&block->queue_lock);
1881 }
1932 } 1882 }
1933} 1883}
1934 1884
@@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3041 cqr->callback_data = req; 2991 cqr->callback_data = req;
3042 cqr->status = DASD_CQR_FILLED; 2992 cqr->status = DASD_CQR_FILLED;
3043 cqr->dq = dq; 2993 cqr->dq = dq;
3044 *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
3045 2994
3046 blk_mq_start_request(req); 2995 blk_mq_start_request(req);
3047 spin_lock(&block->queue_lock); 2996 spin_lock(&block->queue_lock);
@@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3072 unsigned long flags; 3021 unsigned long flags;
3073 int rc = 0; 3022 int rc = 0;
3074 3023
3075 cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)); 3024 cqr = blk_mq_rq_to_pdu(req);
3076 if (!cqr) 3025 if (!cqr)
3077 return BLK_EH_DONE; 3026 return BLK_EH_DONE;
3078 3027
@@ -3174,9 +3123,9 @@ static int dasd_alloc_queue(struct dasd_block *block)
3174 int rc; 3123 int rc;
3175 3124
3176 block->tag_set.ops = &dasd_mq_ops; 3125 block->tag_set.ops = &dasd_mq_ops;
3177 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *); 3126 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3178 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3127 block->tag_set.nr_hw_queues = nr_hw_queues;
3179 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3128 block->tag_set.queue_depth = queue_depth;
3180 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3129 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3181 3130
3182 rc = blk_mq_alloc_tag_set(&block->tag_set); 3131 rc = blk_mq_alloc_tag_set(&block->tag_set);
@@ -4038,7 +3987,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
4038 struct ccw1 *ccw; 3987 struct ccw1 *ccw;
4039 unsigned long *idaw; 3988 unsigned long *idaw;
4040 3989
4041 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3990 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
3991 NULL);
4042 3992
4043 if (IS_ERR(cqr)) { 3993 if (IS_ERR(cqr)) {
4044 /* internal error 13 - Allocating the RDC request failed*/ 3994 /* internal error 13 - Allocating the RDC request failed*/
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 5e963fe0e38d..e36a114354fc 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
407 int rc; 407 int rc;
408 unsigned long flags; 408 unsigned long flags;
409 409
410 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 410 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
411 (sizeof(struct dasd_psf_prssd_data)), 411 (sizeof(struct dasd_psf_prssd_data)),
412 device); 412 device, NULL);
413 if (IS_ERR(cqr)) 413 if (IS_ERR(cqr))
414 return PTR_ERR(cqr); 414 return PTR_ERR(cqr);
415 cqr->startdev = device; 415 cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
457 lcu->flags |= NEED_UAC_UPDATE; 457 lcu->flags |= NEED_UAC_UPDATE;
458 spin_unlock_irqrestore(&lcu->lock, flags); 458 spin_unlock_irqrestore(&lcu->lock, flags);
459 } 459 }
460 dasd_kfree_request(cqr, cqr->memdev); 460 dasd_sfree_request(cqr, cqr->memdev);
461 return rc; 461 return rc;
462} 462}
463 463
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 131f1989f6f3..e1fe02477ea8 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
536 /* Build the request */ 536 /* Build the request */
537 datasize = sizeof(struct dasd_diag_req) + 537 datasize = sizeof(struct dasd_diag_req) +
538 count*sizeof(struct dasd_diag_bio); 538 count*sizeof(struct dasd_diag_bio);
539 cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev); 539 cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
540 blk_mq_rq_to_pdu(req));
540 if (IS_ERR(cqr)) 541 if (IS_ERR(cqr))
541 return cqr; 542 return cqr;
542 543
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index be208e7adcb4..bbf95b78ef5d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
886 } 886 }
887 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 887 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
888 0, /* use rcd_buf as data ara */ 888 0, /* use rcd_buf as data ara */
889 device); 889 device, NULL);
890 if (IS_ERR(cqr)) { 890 if (IS_ERR(cqr)) {
891 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 891 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
892 "Could not allocate RCD request"); 892 "Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
1442 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1442 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
1443 (sizeof(struct dasd_psf_prssd_data) + 1443 (sizeof(struct dasd_psf_prssd_data) +
1444 sizeof(struct dasd_rssd_features)), 1444 sizeof(struct dasd_rssd_features)),
1445 device); 1445 device, NULL);
1446 if (IS_ERR(cqr)) { 1446 if (IS_ERR(cqr)) {
1447 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1447 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1448 "allocate initialization request"); 1448 "allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1504 1504
1505 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1505 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1506 sizeof(struct dasd_psf_ssc_data), 1506 sizeof(struct dasd_psf_ssc_data),
1507 device); 1507 device, NULL);
1508 1508
1509 if (IS_ERR(cqr)) { 1509 if (IS_ERR(cqr)) {
1510 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1510 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1815 1815
1816 cplength = 8; 1816 cplength = 8;
1817 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1817 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1818 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); 1818 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
1819 NULL);
1819 if (IS_ERR(cqr)) 1820 if (IS_ERR(cqr))
1820 return cqr; 1821 return cqr;
1821 ccw = cqr->cpaddr; 1822 ccw = cqr->cpaddr;
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2092 */ 2093 */
2093 itcw_size = itcw_calc_size(0, count, 0); 2094 itcw_size = itcw_calc_size(0, count, 0);
2094 2095
2095 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 2096 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
2097 NULL);
2096 if (IS_ERR(cqr)) 2098 if (IS_ERR(cqr))
2097 return cqr; 2099 return cqr;
2098 2100
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2186 cplength += count; 2188 cplength += count;
2187 2189
2188 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 2190 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2189 startdev); 2191 startdev, NULL);
2190 if (IS_ERR(cqr)) 2192 if (IS_ERR(cqr))
2191 return cqr; 2193 return cqr;
2192 2194
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
2332 } 2334 }
2333 /* Allocate the format ccw request. */ 2335 /* Allocate the format ccw request. */
2334 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 2336 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
2335 datasize, startdev); 2337 datasize, startdev, NULL);
2336 if (IS_ERR(fcp)) 2338 if (IS_ERR(fcp))
2337 return fcp; 2339 return fcp;
2338 2340
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3103 } 3105 }
3104 /* Allocate the ccw request. */ 3106 /* Allocate the ccw request. */
3105 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3107 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3106 startdev); 3108 startdev, blk_mq_rq_to_pdu(req));
3107 if (IS_ERR(cqr)) 3109 if (IS_ERR(cqr))
3108 return cqr; 3110 return cqr;
3109 ccw = cqr->cpaddr; 3111 ccw = cqr->cpaddr;
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3262 3264
3263 /* Allocate the ccw request. */ 3265 /* Allocate the ccw request. */
3264 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3266 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3265 startdev); 3267 startdev, blk_mq_rq_to_pdu(req));
3266 if (IS_ERR(cqr)) 3268 if (IS_ERR(cqr))
3267 return cqr; 3269 return cqr;
3268 ccw = cqr->cpaddr; 3270 ccw = cqr->cpaddr;
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3595 3597
3596 /* Allocate the ccw request. */ 3598 /* Allocate the ccw request. */
3597 itcw_size = itcw_calc_size(0, ctidaw, 0); 3599 itcw_size = itcw_calc_size(0, ctidaw, 0);
3598 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 3600 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
3601 blk_mq_rq_to_pdu(req));
3599 if (IS_ERR(cqr)) 3602 if (IS_ERR(cqr))
3600 return cqr; 3603 return cqr;
3601 3604
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
3862 3865
3863 /* Allocate the ccw request. */ 3866 /* Allocate the ccw request. */
3864 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 3867 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
3865 datasize, startdev); 3868 datasize, startdev, blk_mq_rq_to_pdu(req));
3866 if (IS_ERR(cqr)) 3869 if (IS_ERR(cqr))
3867 return cqr; 3870 return cqr;
3868 3871
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
4102 return -EACCES; 4105 return -EACCES;
4103 4106
4104 useglobal = 0; 4107 useglobal = 0;
4105 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); 4108 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4106 if (IS_ERR(cqr)) { 4109 if (IS_ERR(cqr)) {
4107 mutex_lock(&dasd_reserve_mutex); 4110 mutex_lock(&dasd_reserve_mutex);
4108 useglobal = 1; 4111 useglobal = 1;
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
4157 return -EACCES; 4160 return -EACCES;
4158 4161
4159 useglobal = 0; 4162 useglobal = 0;
4160 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); 4163 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4161 if (IS_ERR(cqr)) { 4164 if (IS_ERR(cqr)) {
4162 mutex_lock(&dasd_reserve_mutex); 4165 mutex_lock(&dasd_reserve_mutex);
4163 useglobal = 1; 4166 useglobal = 1;
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
4211 return -EACCES; 4214 return -EACCES;
4212 4215
4213 useglobal = 0; 4216 useglobal = 0;
4214 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device); 4217 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4215 if (IS_ERR(cqr)) { 4218 if (IS_ERR(cqr)) {
4216 mutex_lock(&dasd_reserve_mutex); 4219 mutex_lock(&dasd_reserve_mutex);
4217 useglobal = 1; 4220 useglobal = 1;
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
4271 4274
4272 useglobal = 0; 4275 useglobal = 0;
4273 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 4276 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
4274 sizeof(struct dasd_snid_data), device); 4277 sizeof(struct dasd_snid_data), device,
4278 NULL);
4275 if (IS_ERR(cqr)) { 4279 if (IS_ERR(cqr)) {
4276 mutex_lock(&dasd_reserve_mutex); 4280 mutex_lock(&dasd_reserve_mutex);
4277 useglobal = 1; 4281 useglobal = 1;
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
4331 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 4335 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
4332 (sizeof(struct dasd_psf_prssd_data) + 4336 (sizeof(struct dasd_psf_prssd_data) +
4333 sizeof(struct dasd_rssd_perf_stats_t)), 4337 sizeof(struct dasd_rssd_perf_stats_t)),
4334 device); 4338 device, NULL);
4335 if (IS_ERR(cqr)) { 4339 if (IS_ERR(cqr)) {
4336 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4340 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4337 "Could not allocate initialization request"); 4341 "Could not allocate initialization request");
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
4477 psf1 = psf_data[1]; 4481 psf1 = psf_data[1];
4478 4482
4479 /* setup CCWs for PSF + RSSD */ 4483 /* setup CCWs for PSF + RSSD */
4480 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device); 4484 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
4481 if (IS_ERR(cqr)) { 4485 if (IS_ERR(cqr)) {
4482 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 4486 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4483 "Could not allocate initialization request"); 4487 "Could not allocate initialization request");
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5037 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5041 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5038 (sizeof(struct dasd_psf_prssd_data) + 5042 (sizeof(struct dasd_psf_prssd_data) +
5039 sizeof(struct dasd_rssd_messages)), 5043 sizeof(struct dasd_rssd_messages)),
5040 device); 5044 device, NULL);
5041 if (IS_ERR(cqr)) { 5045 if (IS_ERR(cqr)) {
5042 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5046 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5043 "Could not allocate read message buffer request"); 5047 "Could not allocate read message buffer request");
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
5126 5130
5127 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5131 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
5128 sizeof(struct dasd_psf_prssd_data) + 1, 5132 sizeof(struct dasd_psf_prssd_data) + 1,
5129 device); 5133 device, NULL);
5130 if (IS_ERR(cqr)) { 5134 if (IS_ERR(cqr)) {
5131 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5135 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5132 "Could not allocate read message buffer request"); 5136 "Could not allocate read message buffer request");
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5284 int rc; 5288 int rc;
5285 5289
5286 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 5290 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
5287 sizeof(struct dasd_psf_cuir_response), 5291 sizeof(struct dasd_psf_cuir_response),
5288 device); 5292 device, NULL);
5289 5293
5290 if (IS_ERR(cqr)) { 5294 if (IS_ERR(cqr)) {
5291 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5295 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 0af8c5295b65..6ef8714dc693 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
447 * is a new ccw in device->eer_cqr. Free the "old" 447 * is a new ccw in device->eer_cqr. Free the "old"
448 * snss request now. 448 * snss request now.
449 */ 449 */
450 dasd_kfree_request(cqr, device); 450 dasd_sfree_request(cqr, device);
451} 451}
452 452
453/* 453/*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
472 if (rc) 472 if (rc)
473 goto out; 473 goto out;
474 474
475 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */, 475 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
476 SNSS_DATA_SIZE, device); 476 SNSS_DATA_SIZE, device, NULL);
477 if (IS_ERR(cqr)) { 477 if (IS_ERR(cqr)) {
478 rc = -ENOMEM; 478 rc = -ENOMEM;
479 cqr = NULL; 479 cqr = NULL;
@@ -505,7 +505,7 @@ out:
505 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 505 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
506 506
507 if (cqr) 507 if (cqr)
508 dasd_kfree_request(cqr, device); 508 dasd_sfree_request(cqr, device);
509 509
510 return rc; 510 return rc;
511} 511}
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
528 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); 528 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
529 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 529 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
530 if (cqr && !in_use) 530 if (cqr && !in_use)
531 dasd_kfree_request(cqr, device); 531 dasd_sfree_request(cqr, device);
532} 532}
533 533
534/* 534/*
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a6b132f7e869..56007a3e7f11 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
356 datasize = sizeof(struct DE_fba_data) + 356 datasize = sizeof(struct DE_fba_data) +
357 nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1)); 357 nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
358 358
359 cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); 359 cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
360 blk_mq_rq_to_pdu(req));
360 if (IS_ERR(cqr)) 361 if (IS_ERR(cqr))
361 return cqr; 362 return cqr;
362 363
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
490 datasize += (count - 1)*sizeof(struct LO_fba_data); 491 datasize += (count - 1)*sizeof(struct LO_fba_data);
491 } 492 }
492 /* Allocate the ccw request. */ 493 /* Allocate the ccw request. */
493 cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev); 494 cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
495 blk_mq_rq_to_pdu(req));
494 if (IS_ERR(cqr)) 496 if (IS_ERR(cqr))
495 return cqr; 497 return cqr;
496 ccw = cqr->cpaddr; 498 ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 96709b1a7bf8..de6b96036aa4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -158,40 +158,33 @@ do { \
158 158
159struct dasd_ccw_req { 159struct dasd_ccw_req {
160 unsigned int magic; /* Eye catcher */ 160 unsigned int magic; /* Eye catcher */
161 int intrc; /* internal error, e.g. from start_IO */
161 struct list_head devlist; /* for dasd_device request queue */ 162 struct list_head devlist; /* for dasd_device request queue */
162 struct list_head blocklist; /* for dasd_block request queue */ 163 struct list_head blocklist; /* for dasd_block request queue */
163
164 /* Where to execute what... */
165 struct dasd_block *block; /* the originating block device */ 164 struct dasd_block *block; /* the originating block device */
166 struct dasd_device *memdev; /* the device used to allocate this */ 165 struct dasd_device *memdev; /* the device used to allocate this */
167 struct dasd_device *startdev; /* device the request is started on */ 166 struct dasd_device *startdev; /* device the request is started on */
168 struct dasd_device *basedev; /* base device if no block->base */ 167 struct dasd_device *basedev; /* base device if no block->base */
169 void *cpaddr; /* address of ccw or tcw */ 168 void *cpaddr; /* address of ccw or tcw */
169 short retries; /* A retry counter */
170 unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ 170 unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
171 char status; /* status of this request */ 171 char status; /* status of this request */
172 short retries; /* A retry counter */ 172 char lpm; /* logical path mask */
173 unsigned long flags; /* flags of this request */ 173 unsigned long flags; /* flags of this request */
174 struct dasd_queue *dq; 174 struct dasd_queue *dq;
175
176 /* ... and how */
177 unsigned long starttime; /* jiffies time of request start */ 175 unsigned long starttime; /* jiffies time of request start */
178 unsigned long expires; /* expiration period in jiffies */ 176 unsigned long expires; /* expiration period in jiffies */
179 char lpm; /* logical path mask */
180 void *data; /* pointer to data area */ 177 void *data; /* pointer to data area */
181
182 /* these are important for recovering erroneous requests */
183 int intrc; /* internal error, e.g. from start_IO */
184 struct irb irb; /* device status in case of an error */ 178 struct irb irb; /* device status in case of an error */
185 struct dasd_ccw_req *refers; /* ERP-chain queueing. */ 179 struct dasd_ccw_req *refers; /* ERP-chain queueing. */
186 void *function; /* originating ERP action */ 180 void *function; /* originating ERP action */
181 void *mem_chunk;
187 182
188 /* these are for statistics only */
189 unsigned long buildclk; /* TOD-clock of request generation */ 183 unsigned long buildclk; /* TOD-clock of request generation */
190 unsigned long startclk; /* TOD-clock of request start */ 184 unsigned long startclk; /* TOD-clock of request start */
191 unsigned long stopclk; /* TOD-clock of request interrupt */ 185 unsigned long stopclk; /* TOD-clock of request interrupt */
192 unsigned long endclk; /* TOD-clock of request termination */ 186 unsigned long endclk; /* TOD-clock of request termination */
193 187
194 /* Callback that is called after reaching final status. */
195 void (*callback)(struct dasd_ccw_req *, void *data); 188 void (*callback)(struct dasd_ccw_req *, void *data);
196 void *callback_data; 189 void *callback_data;
197}; 190};
@@ -235,14 +228,6 @@ struct dasd_ccw_req {
235#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ 228#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
236#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ 229#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
237 230
238/*
239 * There is no reliable way to determine the number of available CPUs on
240 * LPAR but there is no big performance difference between 1 and the
241 * maximum CPU number.
242 * 64 is a good trade off performance wise.
243 */
244#define DASD_NR_HW_QUEUES 64
245#define DASD_MAX_LCU_DEV 256
246#define DASD_REQ_PER_DEV 4 231#define DASD_REQ_PER_DEV 4
247 232
248/* Signature for error recovery functions. */ 233/* Signature for error recovery functions. */
@@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations;
714extern struct kmem_cache *dasd_page_cache; 699extern struct kmem_cache *dasd_page_cache;
715 700
716struct dasd_ccw_req * 701struct dasd_ccw_req *
717dasd_kmalloc_request(int , int, int, struct dasd_device *); 702dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
718struct dasd_ccw_req *
719dasd_smalloc_request(int , int, int, struct dasd_device *);
720void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
721void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); 703void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
722void dasd_wakeup_cb(struct dasd_ccw_req *, void *); 704void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
723 705
724static inline int
725dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
726{
727 return set_normalized_cda(ccw, cda);
728}
729
730struct dasd_device *dasd_alloc_device(void); 706struct dasd_device *dasd_alloc_device(void);
731void dasd_free_device(struct dasd_device *); 707void dasd_free_device(struct dasd_device *);
732 708
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index a070ef0efe65..f230516abb96 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -5,6 +5,7 @@
5 5
6# The following is required for define_trace.h to find ./trace.h 6# The following is required for define_trace.h to find ./trace.h
7CFLAGS_trace.o := -I$(src) 7CFLAGS_trace.o := -I$(src)
8CFLAGS_vfio_ccw_fsm.o := -I$(src)
8 9
9obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ 10obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
10 fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o 11 fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index dce92b2a895d..dbe7c7ac9ac8 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -23,9 +23,13 @@
23#define CCWCHAIN_LEN_MAX 256 23#define CCWCHAIN_LEN_MAX 256
24 24
25struct pfn_array { 25struct pfn_array {
26 /* Starting guest physical I/O address. */
26 unsigned long pa_iova; 27 unsigned long pa_iova;
28 /* Array that stores PFNs of the pages need to pin. */
27 unsigned long *pa_iova_pfn; 29 unsigned long *pa_iova_pfn;
30 /* Array that receives PFNs of the pages pinned. */
28 unsigned long *pa_pfn; 31 unsigned long *pa_pfn;
32 /* Number of pages pinned from @pa_iova. */
29 int pa_nr; 33 int pa_nr;
30}; 34};
31 35
@@ -46,70 +50,33 @@ struct ccwchain {
46}; 50};
47 51
48/* 52/*
49 * pfn_array_pin() - pin user pages in memory 53 * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
50 * @pa: pfn_array on which to perform the operation 54 * @pa: pfn_array on which to perform the operation
51 * @mdev: the mediated device to perform pin/unpin operations 55 * @mdev: the mediated device to perform pin/unpin operations
56 * @iova: target guest physical address
57 * @len: number of bytes that should be pinned from @iova
52 * 58 *
53 * Attempt to pin user pages in memory. 59 * Attempt to allocate memory for PFNs, and pin user pages in memory.
54 * 60 *
55 * Usage of pfn_array: 61 * Usage of pfn_array:
56 * @pa->pa_iova starting guest physical I/O address. Assigned by caller. 62 * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
57 * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated 63 * this structure will be filled in by this function.
58 * by caller.
59 * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by
60 * caller.
61 * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by
62 * caller.
63 * number of pages pinned. Assigned by callee.
64 * 64 *
65 * Returns: 65 * Returns:
66 * Number of pages pinned on success. 66 * Number of pages pinned on success.
67 * If @pa->pa_nr is 0 or negative, returns 0. 67 * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
68 * returns -EINVAL.
68 * If no pages were pinned, returns -errno. 69 * If no pages were pinned, returns -errno.
69 */ 70 */
70static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
71{
72 int i, ret;
73
74 if (pa->pa_nr <= 0) {
75 pa->pa_nr = 0;
76 return 0;
77 }
78
79 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
80 for (i = 1; i < pa->pa_nr; i++)
81 pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
82
83 ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
84 IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
85
86 if (ret > 0 && ret != pa->pa_nr) {
87 vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
88 pa->pa_nr = 0;
89 return 0;
90 }
91
92 return ret;
93}
94
95/* Unpin the pages before releasing the memory. */
96static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
97{
98 vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
99 pa->pa_nr = 0;
100 kfree(pa->pa_iova_pfn);
101}
102
103/* Alloc memory for PFNs, then pin pages with them. */
104static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, 71static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
105 u64 iova, unsigned int len) 72 u64 iova, unsigned int len)
106{ 73{
107 int ret = 0; 74 int i, ret = 0;
108 75
109 if (!len) 76 if (!len)
110 return 0; 77 return 0;
111 78
112 if (pa->pa_nr) 79 if (pa->pa_nr || pa->pa_iova_pfn)
113 return -EINVAL; 80 return -EINVAL;
114 81
115 pa->pa_iova = iova; 82 pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
126 return -ENOMEM; 93 return -ENOMEM;
127 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; 94 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
128 95
129 ret = pfn_array_pin(pa, mdev); 96 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
97 for (i = 1; i < pa->pa_nr; i++)
98 pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
130 99
131 if (ret > 0) 100 ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
132 return ret; 101 IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
133 else if (!ret) 102
103 if (ret < 0) {
104 goto err_out;
105 } else if (ret > 0 && ret != pa->pa_nr) {
106 vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
134 ret = -EINVAL; 107 ret = -EINVAL;
108 goto err_out;
109 }
135 110
111 return ret;
112
113err_out:
114 pa->pa_nr = 0;
136 kfree(pa->pa_iova_pfn); 115 kfree(pa->pa_iova_pfn);
116 pa->pa_iova_pfn = NULL;
137 117
138 return ret; 118 return ret;
139} 119}
140 120
121/* Unpin the pages before releasing the memory. */
122static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
123{
124 vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
125 pa->pa_nr = 0;
126 kfree(pa->pa_iova_pfn);
127}
128
141static int pfn_array_table_init(struct pfn_array_table *pat, int nr) 129static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
142{ 130{
143 pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); 131 pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
365 * This is the chain length not considering any TICs. 353 * This is the chain length not considering any TICs.
366 * You need to do a new round for each TIC target. 354 * You need to do a new round for each TIC target.
367 * 355 *
356 * The program is also validated for absence of not yet supported
357 * indirect data addressing scenarios.
358 *
368 * Returns: the length of the ccw chain or -errno. 359 * Returns: the length of the ccw chain or -errno.
369 */ 360 */
370static int ccwchain_calc_length(u64 iova, struct channel_program *cp) 361static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
391 do { 382 do {
392 cnt++; 383 cnt++;
393 384
385 /*
386 * As we don't want to fail direct addressing even if the
387 * orb specified one of the unsupported formats, we defer
388 * checking for IDAWs in unsupported formats to here.
389 */
390 if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
391 return -EOPNOTSUPP;
392
394 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) 393 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
395 break; 394 break;
396 395
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
503 struct ccw1 *ccw; 502 struct ccw1 *ccw;
504 struct pfn_array_table *pat; 503 struct pfn_array_table *pat;
505 unsigned long *idaws; 504 unsigned long *idaws;
506 int idaw_nr; 505 int ret;
507 506
508 ccw = chain->ch_ccw + idx; 507 ccw = chain->ch_ccw + idx;
509 508
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
523 * needed when translating a direct ccw to a idal ccw. 522 * needed when translating a direct ccw to a idal ccw.
524 */ 523 */
525 pat = chain->ch_pat + idx; 524 pat = chain->ch_pat + idx;
526 if (pfn_array_table_init(pat, 1)) 525 ret = pfn_array_table_init(pat, 1);
527 return -ENOMEM; 526 if (ret)
528 idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, 527 goto out_init;
529 ccw->cda, ccw->count); 528
530 if (idaw_nr < 0) 529 ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
531 return idaw_nr; 530 if (ret < 0)
531 goto out_init;
532 532
533 /* Translate this direct ccw to a idal ccw. */ 533 /* Translate this direct ccw to a idal ccw. */
534 idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); 534 idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
535 if (!idaws) { 535 if (!idaws) {
536 pfn_array_table_unpin_free(pat, cp->mdev); 536 ret = -ENOMEM;
537 return -ENOMEM; 537 goto out_unpin;
538 } 538 }
539 ccw->cda = (__u32) virt_to_phys(idaws); 539 ccw->cda = (__u32) virt_to_phys(idaws);
540 ccw->flags |= CCW_FLAG_IDA; 540 ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
542 pfn_array_table_idal_create_words(pat, idaws); 542 pfn_array_table_idal_create_words(pat, idaws);
543 543
544 return 0; 544 return 0;
545
546out_unpin:
547 pfn_array_table_unpin_free(pat, cp->mdev);
548out_init:
549 ccw->cda = 0;
550 return ret;
545} 551}
546 552
547static int ccwchain_fetch_idal(struct ccwchain *chain, 553static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
571 pat = chain->ch_pat + idx; 577 pat = chain->ch_pat + idx;
572 ret = pfn_array_table_init(pat, idaw_nr); 578 ret = pfn_array_table_init(pat, idaw_nr);
573 if (ret) 579 if (ret)
574 return ret; 580 goto out_init;
575 581
576 /* Translate idal ccw to use new allocated idaws. */ 582 /* Translate idal ccw to use new allocated idaws. */
577 idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); 583 idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ out_free_idaws:
603 kfree(idaws); 609 kfree(idaws);
604out_unpin: 610out_unpin:
605 pfn_array_table_unpin_free(pat, cp->mdev); 611 pfn_array_table_unpin_free(pat, cp->mdev);
612out_init:
613 ccw->cda = 0;
606 return ret; 614 return ret;
607} 615}
608 616
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
656 /* 664 /*
657 * XXX: 665 * XXX:
658 * Only support prefetch enable mode now. 666 * Only support prefetch enable mode now.
659 * Only support 64bit addressing idal.
660 * Only support 4k IDAW.
661 */ 667 */
662 if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) 668 if (!orb->cmd.pfch)
663 return -EOPNOTSUPP; 669 return -EOPNOTSUPP;
664 670
665 INIT_LIST_HEAD(&cp->ccwchain_list); 671 INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
688 ret = ccwchain_loop_tic(chain, cp); 694 ret = ccwchain_loop_tic(chain, cp);
689 if (ret) 695 if (ret)
690 cp_unpin_free(cp); 696 cp_unpin_free(cp);
697 /* It is safe to force: if not set but idals used
698 * ccwchain_calc_length returns an error.
699 */
700 cp->orb.cmd.c64 = 1;
691 701
692 return ret; 702 return ret;
693} 703}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ea6a2d0b2894..770fa9cfc310 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
177{ 177{
178 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 178 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
179 unsigned long flags; 179 unsigned long flags;
180 int rc = -EAGAIN;
180 181
181 spin_lock_irqsave(sch->lock, flags); 182 spin_lock_irqsave(sch->lock, flags);
182 if (!device_is_registered(&sch->dev)) 183 if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
187 188
188 if (cio_update_schib(sch)) { 189 if (cio_update_schib(sch)) {
189 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 190 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
191 rc = 0;
190 goto out_unlock; 192 goto out_unlock;
191 } 193 }
192 194
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
195 private->state = private->mdev ? VFIO_CCW_STATE_IDLE : 197 private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
196 VFIO_CCW_STATE_STANDBY; 198 VFIO_CCW_STATE_STANDBY;
197 } 199 }
200 rc = 0;
198 201
199out_unlock: 202out_unlock:
200 spin_unlock_irqrestore(sch->lock, flags); 203 spin_unlock_irqrestore(sch->lock, flags);
201 204
202 return 0; 205 return rc;
203} 206}
204 207
205static struct css_device_id vfio_ccw_sch_ids[] = { 208static struct css_device_id vfio_ccw_sch_ids[] = {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 3c800642134e..797a82731159 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -13,6 +13,9 @@
13#include "ioasm.h" 13#include "ioasm.h"
14#include "vfio_ccw_private.h" 14#include "vfio_ccw_private.h"
15 15
16#define CREATE_TRACE_POINTS
17#include "vfio_ccw_trace.h"
18
16static int fsm_io_helper(struct vfio_ccw_private *private) 19static int fsm_io_helper(struct vfio_ccw_private *private)
17{ 20{
18 struct subchannel *sch; 21 struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
110 */ 113 */
111 cio_disable_subchannel(sch); 114 cio_disable_subchannel(sch);
112} 115}
116inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
117{
118 return p->sch->schid;
119}
113 120
114/* 121/*
115 * Deal with the ccw command request from the userspace. 122 * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
121 union scsw *scsw = &private->scsw; 128 union scsw *scsw = &private->scsw;
122 struct ccw_io_region *io_region = &private->io_region; 129 struct ccw_io_region *io_region = &private->io_region;
123 struct mdev_device *mdev = private->mdev; 130 struct mdev_device *mdev = private->mdev;
131 char *errstr = "request";
124 132
125 private->state = VFIO_CCW_STATE_BOXED; 133 private->state = VFIO_CCW_STATE_BOXED;
126 134
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
132 /* Don't try to build a cp if transport mode is specified. */ 140 /* Don't try to build a cp if transport mode is specified. */
133 if (orb->tm.b) { 141 if (orb->tm.b) {
134 io_region->ret_code = -EOPNOTSUPP; 142 io_region->ret_code = -EOPNOTSUPP;
143 errstr = "transport mode";
135 goto err_out; 144 goto err_out;
136 } 145 }
137 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), 146 io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
138 orb); 147 orb);
139 if (io_region->ret_code) 148 if (io_region->ret_code) {
149 errstr = "cp init";
140 goto err_out; 150 goto err_out;
151 }
141 152
142 io_region->ret_code = cp_prefetch(&private->cp); 153 io_region->ret_code = cp_prefetch(&private->cp);
143 if (io_region->ret_code) { 154 if (io_region->ret_code) {
155 errstr = "cp prefetch";
144 cp_free(&private->cp); 156 cp_free(&private->cp);
145 goto err_out; 157 goto err_out;
146 } 158 }
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
148 /* Start channel program and wait for I/O interrupt. */ 160 /* Start channel program and wait for I/O interrupt. */
149 io_region->ret_code = fsm_io_helper(private); 161 io_region->ret_code = fsm_io_helper(private);
150 if (io_region->ret_code) { 162 if (io_region->ret_code) {
163 errstr = "cp fsm_io_helper";
151 cp_free(&private->cp); 164 cp_free(&private->cp);
152 goto err_out; 165 goto err_out;
153 } 166 }
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
164 177
165err_out: 178err_out:
166 private->state = VFIO_CCW_STATE_IDLE; 179 private->state = VFIO_CCW_STATE_IDLE;
180 trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
181 io_region->ret_code, errstr);
167} 182}
168 183
169/* 184/*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644
index 000000000000..b1da53ddec1f
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -0,0 +1,54 @@
1/* SPDX-License-Identifier: GPL-2.0
2 * Tracepoints for vfio_ccw driver
3 *
4 * Copyright IBM Corp. 2018
5 *
6 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7 * Halil Pasic <pasic@linux.vnet.ibm.com>
8 */
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM vfio_ccw
12
13#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
14#define _VFIO_CCW_TRACE_
15
16#include <linux/tracepoint.h>
17
18TRACE_EVENT(vfio_ccw_io_fctl,
19 TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
20 TP_ARGS(fctl, schid, errno, errstr),
21
22 TP_STRUCT__entry(
23 __field(int, fctl)
24 __field_struct(struct subchannel_id, schid)
25 __field(int, errno)
26 __field(char*, errstr)
27 ),
28
29 TP_fast_assign(
30 __entry->fctl = fctl;
31 __entry->schid = schid;
32 __entry->errno = errno;
33 __entry->errstr = errstr;
34 ),
35
36 TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
37 __entry->schid.cssid,
38 __entry->schid.ssid,
39 __entry->schid.sch_no,
40 __entry->fctl,
41 __entry->errno,
42 __entry->errstr)
43);
44
45#endif /* _VFIO_CCW_TRACE_ */
46
47/* This part must be outside protection */
48
49#undef TRACE_INCLUDE_PATH
50#define TRACE_INCLUDE_PATH .
51#undef TRACE_INCLUDE_FILE
52#define TRACE_INCLUDE_FILE vfio_ccw_trace
53
54#include <trace/define_trace.h>
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2a5fec55bf60..a246a618f9a4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -829,6 +829,17 @@ struct qeth_trap_id {
829/*some helper functions*/ 829/*some helper functions*/
830#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 830#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
831 831
832static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
833 unsigned int elements)
834{
835 unsigned int i;
836
837 for (i = 0; i < elements; i++)
838 memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
839 buf->element[14].sflags = 0;
840 buf->element[15].sflags = 0;
841}
842
832/** 843/**
833 * qeth_get_elements_for_range() - find number of SBALEs to cover range. 844 * qeth_get_elements_for_range() - find number of SBALEs to cover range.
834 * @start: Start of the address range. 845 * @start: Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
1029 __u16, __u16, 1040 __u16, __u16,
1030 enum qeth_prot_versions); 1041 enum qeth_prot_versions);
1031int qeth_set_features(struct net_device *, netdev_features_t); 1042int qeth_set_features(struct net_device *, netdev_features_t);
1032void qeth_recover_features(struct net_device *dev); 1043void qeth_enable_hw_features(struct net_device *dev);
1033netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); 1044netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1034netdev_features_t qeth_features_check(struct sk_buff *skb, 1045netdev_features_t qeth_features_check(struct sk_buff *skb,
1035 struct net_device *dev, 1046 struct net_device *dev,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8e1474f1ffac..d01ac29fd986 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 struct qeth_qdio_out_buffer *buf, 73 struct qeth_qdio_out_buffer *buf,
74 enum iucv_tx_notify notification); 74 enum iucv_tx_notify notification);
75static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 75static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
76static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
77 struct qeth_qdio_out_buffer *buf,
78 enum qeth_qdio_buffer_states newbufstate);
79static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 76static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
80 77
81struct workqueue_struct *qeth_wq; 78struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
489 struct qaob *aob; 486 struct qaob *aob;
490 struct qeth_qdio_out_buffer *buffer; 487 struct qeth_qdio_out_buffer *buffer;
491 enum iucv_tx_notify notification; 488 enum iucv_tx_notify notification;
489 unsigned int i;
492 490
493 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 491 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
494 QETH_CARD_TEXT(card, 5, "haob"); 492 QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
513 qeth_notify_skbs(buffer->q, buffer, notification); 511 qeth_notify_skbs(buffer->q, buffer, notification);
514 512
515 buffer->aob = NULL; 513 buffer->aob = NULL;
516 qeth_clear_output_buffer(buffer->q, buffer, 514 /* Free dangling allocations. The attached skbs are handled by
517 QETH_QDIO_BUF_HANDLED_DELAYED); 515 * qeth_cleanup_handled_pending().
516 */
517 for (i = 0;
518 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
519 i++) {
520 if (aob->sba[i] && buffer->is_header[i])
521 kmem_cache_free(qeth_core_header_cache,
522 (void *) aob->sba[i]);
523 }
524 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
518 525
519 /* from here on: do not touch buffer anymore */
520 qdio_release_aob(aob); 526 qdio_release_aob(aob);
521} 527}
522 528
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3759 QETH_CARD_TEXT(queue->card, 5, "aob"); 3765 QETH_CARD_TEXT(queue->card, 5, "aob");
3760 QETH_CARD_TEXT_(queue->card, 5, "%lx", 3766 QETH_CARD_TEXT_(queue->card, 5, "%lx",
3761 virt_to_phys(buffer->aob)); 3767 virt_to_phys(buffer->aob));
3768
3769 /* prepare the queue slot for re-use: */
3770 qeth_scrub_qdio_buffer(buffer->buffer,
3771 QETH_MAX_BUFFER_ELEMENTS(card));
3762 if (qeth_init_qdio_out_buf(queue, bidx)) { 3772 if (qeth_init_qdio_out_buf(queue, bidx)) {
3763 QETH_CARD_TEXT(card, 2, "outofbuf"); 3773 QETH_CARD_TEXT(card, 2, "outofbuf");
3764 qeth_schedule_recovery(card); 3774 qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
4834 goto out; 4844 goto out;
4835 } 4845 }
4836 4846
4837 ccw_device_get_id(CARD_RDEV(card), &id); 4847 ccw_device_get_id(CARD_DDEV(card), &id);
4838 request->resp_buf_len = sizeof(*response); 4848 request->resp_buf_len = sizeof(*response);
4839 request->resp_version = DIAG26C_VERSION2; 4849 request->resp_version = DIAG26C_VERSION2;
4840 request->op_code = DIAG26C_GET_MAC; 4850 request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6459#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ 6469#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6460 NETIF_F_IPV6_CSUM) 6470 NETIF_F_IPV6_CSUM)
6461/** 6471/**
6462 * qeth_recover_features() - Restore device features after recovery 6472 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6463 * @dev: the recovering net_device 6473 * @dev: a net_device
6464 *
6465 * Caller must hold rtnl lock.
6466 */ 6474 */
6467void qeth_recover_features(struct net_device *dev) 6475void qeth_enable_hw_features(struct net_device *dev)
6468{ 6476{
6469 netdev_features_t features = dev->features;
6470 struct qeth_card *card = dev->ml_priv; 6477 struct qeth_card *card = dev->ml_priv;
6478 netdev_features_t features;
6471 6479
6480 rtnl_lock();
6481 features = dev->features;
6472 /* force-off any feature that needs an IPA sequence. 6482 /* force-off any feature that needs an IPA sequence.
6473 * netdev_update_features() will restart them. 6483 * netdev_update_features() will restart them.
6474 */ 6484 */
6475 dev->features &= ~QETH_HW_FEATURES; 6485 dev->features &= ~QETH_HW_FEATURES;
6476 netdev_update_features(dev); 6486 netdev_update_features(dev);
6477 6487 if (features != dev->features)
6478 if (features == dev->features) 6488 dev_warn(&card->gdev->dev,
6479 return; 6489 "Device recovery failed to restore all offload features\n");
6480 dev_warn(&card->gdev->dev, 6490 rtnl_unlock();
6481 "Device recovery failed to restore all offload features\n");
6482} 6491}
6483EXPORT_SYMBOL_GPL(qeth_recover_features); 6492EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6484 6493
6485int qeth_set_features(struct net_device *dev, netdev_features_t features) 6494int qeth_set_features(struct net_device *dev, netdev_features_t features)
6486{ 6495{
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a7cb37da6a21..2487f0aeb165 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
140 140
141static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 141static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
142{ 142{
143 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 143 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
144 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; 144 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
145 int rc; 145 int rc;
146 146
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
157 157
158static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) 158static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
159{ 159{
160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
161 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; 161 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
162 int rc; 162 int rc;
163 163
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
501 return -ERESTARTSYS; 501 return -ERESTARTSYS;
502 } 502 }
503 503
504 /* avoid racing against concurrent state change: */
505 if (!mutex_trylock(&card->conf_mutex))
506 return -EAGAIN;
507
504 if (!qeth_card_hw_is_reachable(card)) { 508 if (!qeth_card_hw_is_reachable(card)) {
505 ether_addr_copy(dev->dev_addr, addr->sa_data); 509 ether_addr_copy(dev->dev_addr, addr->sa_data);
506 return 0; 510 goto out_unlock;
507 } 511 }
508 512
509 /* don't register the same address twice */ 513 /* don't register the same address twice */
510 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && 514 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
511 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) 515 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
512 return 0; 516 goto out_unlock;
513 517
514 /* add the new address, switch over, drop the old */ 518 /* add the new address, switch over, drop the old */
515 rc = qeth_l2_send_setmac(card, addr->sa_data); 519 rc = qeth_l2_send_setmac(card, addr->sa_data);
516 if (rc) 520 if (rc)
517 return rc; 521 goto out_unlock;
518 ether_addr_copy(old_addr, dev->dev_addr); 522 ether_addr_copy(old_addr, dev->dev_addr);
519 ether_addr_copy(dev->dev_addr, addr->sa_data); 523 ether_addr_copy(dev->dev_addr, addr->sa_data);
520 524
521 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) 525 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
522 qeth_l2_remove_mac(card, old_addr); 526 qeth_l2_remove_mac(card, old_addr);
523 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; 527 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
524 return 0; 528
529out_unlock:
530 mutex_unlock(&card->conf_mutex);
531 return rc;
525} 532}
526 533
527static void qeth_promisc_to_bridge(struct qeth_card *card) 534static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1112 netif_carrier_off(card->dev); 1119 netif_carrier_off(card->dev);
1113 1120
1114 qeth_set_allowed_threads(card, 0xffffffff, 0); 1121 qeth_set_allowed_threads(card, 0xffffffff, 0);
1122
1123 qeth_enable_hw_features(card->dev);
1115 if (recover_flag == CARD_STATE_RECOVER) { 1124 if (recover_flag == CARD_STATE_RECOVER) {
1116 if (recovery_mode && 1125 if (recovery_mode &&
1117 card->info.type != QETH_CARD_TYPE_OSN) { 1126 card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1123 } 1132 }
1124 /* this also sets saved unicast addresses */ 1133 /* this also sets saved unicast addresses */
1125 qeth_l2_set_rx_mode(card->dev); 1134 qeth_l2_set_rx_mode(card->dev);
1126 rtnl_lock();
1127 qeth_recover_features(card->dev);
1128 rtnl_unlock();
1129 } 1135 }
1130 /* let user_space know that device is online */ 1136 /* let user_space know that device is online */
1131 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1137 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e7fa479adf47..5905dc63e256 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2662 netif_carrier_on(card->dev); 2662 netif_carrier_on(card->dev);
2663 else 2663 else
2664 netif_carrier_off(card->dev); 2664 netif_carrier_off(card->dev);
2665
2666 qeth_enable_hw_features(card->dev);
2665 if (recover_flag == CARD_STATE_RECOVER) { 2667 if (recover_flag == CARD_STATE_RECOVER) {
2666 rtnl_lock(); 2668 rtnl_lock();
2667 if (recovery_mode) 2669 if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2669 else 2671 else
2670 dev_open(card->dev); 2672 dev_open(card->dev);
2671 qeth_l3_set_rx_mode(card->dev); 2673 qeth_l3_set_rx_mode(card->dev);
2672 qeth_recover_features(card->dev);
2673 rtnl_unlock(); 2674 rtnl_unlock();
2674 } 2675 }
2675 qeth_trace_features(card); 2676 qeth_trace_features(card);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a9831bd37a73..a57f3a7d4748 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1974 u32 lun_count, nexus; 1974 u32 lun_count, nexus;
1975 u32 i, bus, target; 1975 u32 i, bus, target;
1976 u8 expose_flag, attribs; 1976 u8 expose_flag, attribs;
1977 u8 devtype;
1978 1977
1979 lun_count = aac_get_safw_phys_lun_count(dev); 1978 lun_count = aac_get_safw_phys_lun_count(dev);
1980 1979
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1992 continue; 1991 continue;
1993 1992
1994 if (expose_flag != 0) { 1993 if (expose_flag != 0) {
1995 devtype = AAC_DEVTYPE_RAID_MEMBER; 1994 dev->hba_map[bus][target].devtype =
1996 goto update_devtype; 1995 AAC_DEVTYPE_RAID_MEMBER;
1996 continue;
1997 } 1997 }
1998 1998
1999 if (nexus != 0 && (attribs & 8)) { 1999 if (nexus != 0 && (attribs & 8)) {
2000 devtype = AAC_DEVTYPE_NATIVE_RAW; 2000 dev->hba_map[bus][target].devtype =
2001 AAC_DEVTYPE_NATIVE_RAW;
2001 dev->hba_map[bus][target].rmw_nexus = 2002 dev->hba_map[bus][target].rmw_nexus =
2002 nexus; 2003 nexus;
2003 } else 2004 } else
2004 devtype = AAC_DEVTYPE_ARC_RAW; 2005 dev->hba_map[bus][target].devtype =
2006 AAC_DEVTYPE_ARC_RAW;
2005 2007
2006 dev->hba_map[bus][target].scan_counter = dev->scan_counter; 2008 dev->hba_map[bus][target].scan_counter = dev->scan_counter;
2007 2009
2008 aac_set_safw_target_qd(dev, bus, target); 2010 aac_set_safw_target_qd(dev, bus, target);
2009
2010update_devtype:
2011 dev->hba_map[bus][target].devtype = devtype;
2012 } 2011 }
2013} 2012}
2014 2013
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0a9b8b387bd2..02d65dce74e5 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
760 ioa_cfg->hrrq[i].allow_interrupts = 0; 760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock); 761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 } 762 }
763 wmb();
764 763
765 /* Set interrupt mask to stop all new interrupts */ 764 /* Set interrupt mask to stop all new interrupts */
766 if (ioa_cfg->sis64) 765 if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8403 ioa_cfg->hrrq[i].allow_interrupts = 1; 8402 ioa_cfg->hrrq[i].allow_interrupts = 1;
8404 spin_unlock(&ioa_cfg->hrrq[i]._lock); 8403 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8405 } 8404 }
8406 wmb();
8407 if (ioa_cfg->sis64) { 8405 if (ioa_cfg->sis64) {
8408 /* Set the adapter to the correct endian mode. */ 8406 /* Set the adapter to the correct endian mode. */
8409 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg); 8407 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0fea2e2326be..1027b0cb7fa3 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
1224void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1224void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1225{ 1225{
1226 struct qla_tgt *tgt = sess->tgt; 1226 struct qla_tgt *tgt = sess->tgt;
1227 struct qla_hw_data *ha = sess->vha->hw;
1228 unsigned long flags; 1227 unsigned long flags;
1229 1228
1230 if (sess->disc_state == DSC_DELETE_PEND) 1229 if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1241 return; 1240 return;
1242 } 1241 }
1243 1242
1244 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1245 if (sess->deleted == QLA_SESS_DELETED) 1243 if (sess->deleted == QLA_SESS_DELETED)
1246 sess->logout_on_delete = 0; 1244 sess->logout_on_delete = 0;
1247 1245
1246 spin_lock_irqsave(&sess->vha->work_lock, flags);
1248 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1247 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1249 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1248 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1250 return; 1249 return;
1251 } 1250 }
1252 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1251 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1253 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1252 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1254 1253
1255 sess->disc_state = DSC_DELETE_PEND; 1254 sess->disc_state = DSC_DELETE_PEND;
1256 1255
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24d7496cd9e2..364e71861bfd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
5507 int k = sdebug_add_host; 5507 int k = sdebug_add_host;
5508 5508
5509 stop_all_queued(); 5509 stop_all_queued();
5510 free_all_queued();
5511 for (; k; k--) 5510 for (; k; k--)
5512 sdebug_remove_adapter(); 5511 sdebug_remove_adapter();
5512 free_all_queued();
5513 driver_unregister(&sdebug_driverfs_driver); 5513 driver_unregister(&sdebug_driverfs_driver);
5514 bus_unregister(&pseudo_lld_bus); 5514 bus_unregister(&pseudo_lld_bus);
5515 root_device_unregister(pseudo_primary); 5515 root_device_unregister(pseudo_primary);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1da3d71e9f61..13948102ca29 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
3592 3592
3593 /* the blk_end_sync_io() doesn't check the error */ 3593 /* the blk_end_sync_io() doesn't check the error */
3594 if (inflight) 3594 if (inflight)
3595 blk_mq_complete_request(req); 3595 __blk_complete_request(req);
3596 return BLK_EH_DONE; 3596 return BLK_EH_DONE;
3597} 3597}
3598 3598
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 53ae52dbff84..cd2fdac000c9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
51#include <linux/atomic.h> 51#include <linux/atomic.h>
52#include <linux/ratelimit.h> 52#include <linux/ratelimit.h>
53#include <linux/uio.h> 53#include <linux/uio.h>
54#include <linux/cred.h> /* for sg_check_file_access() */
54 55
55#include "scsi.h" 56#include "scsi.h"
56#include <scsi/scsi_dbg.h> 57#include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
209 sdev_prefix_printk(prefix, (sdp)->device, \ 210 sdev_prefix_printk(prefix, (sdp)->device, \
210 (sdp)->disk->disk_name, fmt, ##a) 211 (sdp)->disk->disk_name, fmt, ##a)
211 212
213/*
214 * The SCSI interfaces that use read() and write() as an asynchronous variant of
215 * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
216 * to trigger read() and write() calls from various contexts with elevated
217 * privileges. This can lead to kernel memory corruption (e.g. if these
218 * interfaces are called through splice()) and privilege escalation inside
219 * userspace (e.g. if a process with access to such a device passes a file
220 * descriptor to a SUID binary as stdin/stdout/stderr).
221 *
222 * This function provides protection for the legacy API by restricting the
223 * calling context.
224 */
225static int sg_check_file_access(struct file *filp, const char *caller)
226{
227 if (filp->f_cred != current_real_cred()) {
228 pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
229 caller, task_tgid_vnr(current), current->comm);
230 return -EPERM;
231 }
232 if (uaccess_kernel()) {
233 pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
234 caller, task_tgid_vnr(current), current->comm);
235 return -EACCES;
236 }
237 return 0;
238}
239
212static int sg_allow_access(struct file *filp, unsigned char *cmd) 240static int sg_allow_access(struct file *filp, unsigned char *cmd)
213{ 241{
214 struct sg_fd *sfp = filp->private_data; 242 struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
393 struct sg_header *old_hdr = NULL; 421 struct sg_header *old_hdr = NULL;
394 int retval = 0; 422 int retval = 0;
395 423
424 /*
425 * This could cause a response to be stranded. Close the associated
426 * file descriptor to free up any resources being held.
427 */
428 retval = sg_check_file_access(filp, __func__);
429 if (retval)
430 return retval;
431
396 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 432 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
397 return -ENXIO; 433 return -ENXIO;
398 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, 434 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
580 struct sg_header old_hdr; 616 struct sg_header old_hdr;
581 sg_io_hdr_t *hp; 617 sg_io_hdr_t *hp;
582 unsigned char cmnd[SG_MAX_CDB_SIZE]; 618 unsigned char cmnd[SG_MAX_CDB_SIZE];
619 int retval;
583 620
584 if (unlikely(uaccess_kernel())) 621 retval = sg_check_file_access(filp, __func__);
585 return -EINVAL; 622 if (retval)
623 return retval;
586 624
587 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 625 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
588 return -ENXIO; 626 return -ENXIO;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 36f59a1be7e9..61389bdc7926 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
654static int scsifront_sdev_configure(struct scsi_device *sdev) 654static int scsifront_sdev_configure(struct scsi_device *sdev)
655{ 655{
656 struct vscsifrnt_info *info = shost_priv(sdev->host); 656 struct vscsifrnt_info *info = shost_priv(sdev->host);
657 int err;
657 658
658 if (info && current == info->curr) 659 if (info && current == info->curr) {
659 xenbus_printf(XBT_NIL, info->dev->nodename, 660 err = xenbus_printf(XBT_NIL, info->dev->nodename,
660 info->dev_state_path, "%d", XenbusStateConnected); 661 info->dev_state_path, "%d", XenbusStateConnected);
662 if (err) {
663 xenbus_dev_error(info->dev, err,
664 "%s: writing dev_state_path", __func__);
665 return err;
666 }
667 }
661 668
662 return 0; 669 return 0;
663} 670}
@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
665static void scsifront_sdev_destroy(struct scsi_device *sdev) 672static void scsifront_sdev_destroy(struct scsi_device *sdev)
666{ 673{
667 struct vscsifrnt_info *info = shost_priv(sdev->host); 674 struct vscsifrnt_info *info = shost_priv(sdev->host);
675 int err;
668 676
669 if (info && current == info->curr) 677 if (info && current == info->curr) {
670 xenbus_printf(XBT_NIL, info->dev->nodename, 678 err = xenbus_printf(XBT_NIL, info->dev->nodename,
671 info->dev_state_path, "%d", XenbusStateClosed); 679 info->dev_state_path, "%d", XenbusStateClosed);
680 if (err)
681 xenbus_dev_error(info->dev, err,
682 "%s: writing dev_state_path", __func__);
683 }
672} 684}
673 685
674static struct scsi_host_template scsifront_sht = { 686static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
1003 1015
1004 if (scsi_add_device(info->host, chn, tgt, lun)) { 1016 if (scsi_add_device(info->host, chn, tgt, lun)) {
1005 dev_err(&dev->dev, "scsi_add_device\n"); 1017 dev_err(&dev->dev, "scsi_add_device\n");
1006 xenbus_printf(XBT_NIL, dev->nodename, 1018 err = xenbus_printf(XBT_NIL, dev->nodename,
1007 info->dev_state_path, 1019 info->dev_state_path,
1008 "%d", XenbusStateClosed); 1020 "%d", XenbusStateClosed);
1021 if (err)
1022 xenbus_dev_error(dev, err,
1023 "%s: writing dev_state_path", __func__);
1009 } 1024 }
1010 break; 1025 break;
1011 case VSCSIFRONT_OP_DEL_LUN: 1026 case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
1019 } 1034 }
1020 break; 1035 break;
1021 case VSCSIFRONT_OP_READD_LUN: 1036 case VSCSIFRONT_OP_READD_LUN:
1022 if (device_state == XenbusStateConnected) 1037 if (device_state == XenbusStateConnected) {
1023 xenbus_printf(XBT_NIL, dev->nodename, 1038 err = xenbus_printf(XBT_NIL, dev->nodename,
1024 info->dev_state_path, 1039 info->dev_state_path,
1025 "%d", XenbusStateConnected); 1040 "%d", XenbusStateConnected);
1041 if (err)
1042 xenbus_dev_error(dev, err,
1043 "%s: writing dev_state_path", __func__);
1044 }
1026 break; 1045 break;
1027 default: 1046 default:
1028 break; 1047 break;
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index f4e3bd40c72e..6ef18cf8f243 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -39,10 +39,15 @@
39 39
40#define GPC_M4_PU_PDN_FLG 0x1bc 40#define GPC_M4_PU_PDN_FLG 0x1bc
41 41
42 42/*
43#define PGC_MIPI 4 43 * The PGC offset values in Reference Manual
44#define PGC_PCIE 5 44 * (Rev. 1, 01/2018 and the older ones) GPC chapter's
45#define PGC_USB_HSIC 8 45 * GPC_PGC memory map are incorrect, below offset
46 * values are from design RTL.
47 */
48#define PGC_MIPI 16
49#define PGC_PCIE 17
50#define PGC_USB_HSIC 20
46#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) 51#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
47#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) 52#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
48 53
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 9dc02f390ba3..5856e792d09c 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
5 5
6config QCOM_COMMAND_DB 6config QCOM_COMMAND_DB
7 bool "Qualcomm Command DB" 7 bool "Qualcomm Command DB"
8 depends on (ARCH_QCOM && OF) || COMPILE_TEST 8 depends on ARCH_QCOM || COMPILE_TEST
9 depends on OF_RESERVED_MEM
9 help 10 help
10 Command DB queries shared memory by key string for shared system 11 Command DB queries shared memory by key string for shared system
11 resources. Platform drivers that require to set state of a shared 12 resources. Platform drivers that require to set state of a shared
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 95120acc4d80..50d03d8b4f9a 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
194 194
195static bool has_cpg_mstp; 195static bool has_cpg_mstp;
196 196
197static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd) 197static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
198{ 198{
199 struct generic_pm_domain *genpd = &pd->genpd; 199 struct generic_pm_domain *genpd = &pd->genpd;
200 const char *name = pd->genpd.name; 200 const char *name = pd->genpd.name;
201 struct dev_power_governor *gov = &simple_qos_governor; 201 struct dev_power_governor *gov = &simple_qos_governor;
202 int error;
202 203
203 if (pd->flags & PD_CPU) { 204 if (pd->flags & PD_CPU) {
204 /* 205 /*
@@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
251 rcar_sysc_power_up(&pd->ch); 252 rcar_sysc_power_up(&pd->ch);
252 253
253finalize: 254finalize:
254 pm_genpd_init(genpd, gov, false); 255 error = pm_genpd_init(genpd, gov, false);
256 if (error)
257 pr_err("Failed to init PM domain %s: %d\n", name, error);
258
259 return error;
255} 260}
256 261
257static const struct of_device_id rcar_sysc_matches[] __initconst = { 262static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void)
375 pr_debug("%pOF: syscier = 0x%08x\n", np, syscier); 380 pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
376 iowrite32(syscier, base + SYSCIER); 381 iowrite32(syscier, base + SYSCIER);
377 382
383 /*
384 * First, create all PM domains
385 */
378 for (i = 0; i < info->num_areas; i++) { 386 for (i = 0; i < info->num_areas; i++) {
379 const struct rcar_sysc_area *area = &info->areas[i]; 387 const struct rcar_sysc_area *area = &info->areas[i];
380 struct rcar_sysc_pd *pd; 388 struct rcar_sysc_pd *pd;
@@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void)
397 pd->ch.isr_bit = area->isr_bit; 405 pd->ch.isr_bit = area->isr_bit;
398 pd->flags = area->flags; 406 pd->flags = area->flags;
399 407
400 rcar_sysc_pd_setup(pd); 408 error = rcar_sysc_pd_setup(pd);
401 if (area->parent >= 0) 409 if (error)
402 pm_genpd_add_subdomain(domains->domains[area->parent], 410 goto out_put;
403 &pd->genpd);
404 411
405 domains->domains[area->isr_bit] = &pd->genpd; 412 domains->domains[area->isr_bit] = &pd->genpd;
406 } 413 }
407 414
415 /*
416 * Second, link all PM domains to their parents
417 */
418 for (i = 0; i < info->num_areas; i++) {
419 const struct rcar_sysc_area *area = &info->areas[i];
420
421 if (!area->name || area->parent < 0)
422 continue;
423
424 error = pm_genpd_add_subdomain(domains->domains[area->parent],
425 domains->domains[area->isr_bit]);
426 if (error)
427 pr_warn("Failed to add PM subdomain %s to parent %u\n",
428 area->name, area->parent);
429 }
430
408 error = of_genpd_add_provider_onecell(np, &domains->onecell_data); 431 error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
409 432
410out_put: 433out_put:
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index e8c440329708..31db510018a9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
30 struct page **tmp = pages; 30 struct page **tmp = pages;
31 31
32 if (!pages) 32 if (!pages)
33 return NULL; 33 return ERR_PTR(-ENOMEM);
34 34
35 if (buffer->flags & ION_FLAG_CACHED) 35 if (buffer->flags & ION_FLAG_CACHED)
36 pgprot = PAGE_KERNEL; 36 pgprot = PAGE_KERNEL;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index ea194aa01a64..257b0daff01f 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
642 /* Make sure D/A update mode is direct update */ 642 /* Make sure D/A update mode is direct update */
643 outb(0, dev->iobase + DAQP_AUX_REG); 643 outb(0, dev->iobase + DAQP_AUX_REG);
644 644
645 for (i = 0; i > insn->n; i++) { 645 for (i = 0; i < insn->n; i++) {
646 unsigned int val = data[i]; 646 unsigned int val = data[i];
647 int ret; 647 int ret;
648 648
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 45c05527a57a..faf4b4158cfa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
1051 return _FAIL; 1051 return _FAIL;
1052 1052
1053 1053
1054 if (len > MAX_IE_SZ) 1054 if (len < 0 || len > MAX_IE_SZ)
1055 return _FAIL; 1055 return _FAIL;
1056 1056
1057 pbss_network->IELength = len; 1057 pbss_network->IELength = len;
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c
index 7947edb239a1..88ba5b2fea6a 100644
--- a/drivers/staging/rtlwifi/rtl8822be/hw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/hw.c
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
803 return; 803 return;
804 804
805 pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); 805 pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
806 pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7)); 806 pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
807 807
808 pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); 808 pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
809 pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); 809 pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
index 012fb618840b..a45f0eb69d3f 100644
--- a/drivers/staging/rtlwifi/wifi.h
+++ b/drivers/staging/rtlwifi/wifi.h
@@ -88,6 +88,7 @@
88#define RTL_USB_MAX_RX_COUNT 100 88#define RTL_USB_MAX_RX_COUNT 100
89#define QBSS_LOAD_SIZE 5 89#define QBSS_LOAD_SIZE 5
90#define MAX_WMMELE_LENGTH 64 90#define MAX_WMMELE_LENGTH 64
91#define ASPM_L1_LATENCY 7
91 92
92#define TOTAL_CAM_ENTRY 32 93#define TOTAL_CAM_ENTRY 32
93 94
diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig
index 3aa981fbc8f5..e45ed08a5166 100644
--- a/drivers/staging/typec/Kconfig
+++ b/drivers/staging/typec/Kconfig
@@ -11,6 +11,7 @@ config TYPEC_TCPCI
11 11
12config TYPEC_RT1711H 12config TYPEC_RT1711H
13 tristate "Richtek RT1711H Type-C chip driver" 13 tristate "Richtek RT1711H Type-C chip driver"
14 depends on I2C
14 select TYPEC_TCPCI 15 select TYPEC_TCPCI
15 help 16 help
16 Richtek RT1711H Type-C chip driver that works with 17 Richtek RT1711H Type-C chip driver that works with
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 01ac306131c1..10db5656fd5d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
3727 * Check for overflow of 8byte PRI READ_KEYS payload and 3727 * Check for overflow of 8byte PRI READ_KEYS payload and
3728 * next reservation key list descriptor. 3728 * next reservation key list descriptor.
3729 */ 3729 */
3730 if ((add_len + 8) > (cmd->data_length - 8)) 3730 if (off + 8 <= cmd->data_length) {
3731 break; 3731 put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
3732 3732 off += 8;
3733 put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); 3733 }
3734 off += 8; 3734 /*
3735 * SPC5r17: 6.16.2 READ KEYS service action
3736 * The ADDITIONAL LENGTH field indicates the number of bytes in
3737 * the Reservation key list. The contents of the ADDITIONAL
3738 * LENGTH field are not altered based on the allocation length
3739 */
3735 add_len += 8; 3740 add_len += 8;
3736 } 3741 }
3737 spin_unlock(&dev->t10_pr.registration_lock); 3742 spin_unlock(&dev->t10_pr.registration_lock);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7f96dfa32b9c..d8dc3d22051f 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
656} 656}
657 657
658static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 658static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
659 bool bidi) 659 bool bidi, uint32_t read_len)
660{ 660{
661 struct se_cmd *se_cmd = cmd->se_cmd; 661 struct se_cmd *se_cmd = cmd->se_cmd;
662 int i, dbi; 662 int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
689 for_each_sg(data_sg, sg, data_nents, i) { 689 for_each_sg(data_sg, sg, data_nents, i) {
690 int sg_remaining = sg->length; 690 int sg_remaining = sg->length;
691 to = kmap_atomic(sg_page(sg)) + sg->offset; 691 to = kmap_atomic(sg_page(sg)) + sg->offset;
692 while (sg_remaining > 0) { 692 while (sg_remaining > 0 && read_len > 0) {
693 if (block_remaining == 0) { 693 if (block_remaining == 0) {
694 if (from) 694 if (from)
695 kunmap_atomic(from); 695 kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
701 } 701 }
702 copy_bytes = min_t(size_t, sg_remaining, 702 copy_bytes = min_t(size_t, sg_remaining,
703 block_remaining); 703 block_remaining);
704 if (read_len < copy_bytes)
705 copy_bytes = read_len;
704 offset = DATA_BLOCK_SIZE - block_remaining; 706 offset = DATA_BLOCK_SIZE - block_remaining;
705 tcmu_flush_dcache_range(from, copy_bytes); 707 tcmu_flush_dcache_range(from, copy_bytes);
706 memcpy(to + sg->length - sg_remaining, from + offset, 708 memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
708 710
709 sg_remaining -= copy_bytes; 711 sg_remaining -= copy_bytes;
710 block_remaining -= copy_bytes; 712 block_remaining -= copy_bytes;
713 read_len -= copy_bytes;
711 } 714 }
712 kunmap_atomic(to - sg->offset); 715 kunmap_atomic(to - sg->offset);
716 if (read_len == 0)
717 break;
713 } 718 }
714 if (from) 719 if (from)
715 kunmap_atomic(from); 720 kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1042{ 1047{
1043 struct se_cmd *se_cmd = cmd->se_cmd; 1048 struct se_cmd *se_cmd = cmd->se_cmd;
1044 struct tcmu_dev *udev = cmd->tcmu_dev; 1049 struct tcmu_dev *udev = cmd->tcmu_dev;
1050 bool read_len_valid = false;
1051 uint32_t read_len = se_cmd->data_length;
1045 1052
1046 /* 1053 /*
1047 * cmd has been completed already from timeout, just reclaim 1054 * cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1056 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1063 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1057 cmd->se_cmd); 1064 cmd->se_cmd);
1058 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1065 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1059 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1066 goto done;
1067 }
1068
1069 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1070 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1071 read_len_valid = true;
1072 if (entry->rsp.read_len < read_len)
1073 read_len = entry->rsp.read_len;
1074 }
1075
1076 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1060 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1077 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1061 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 1078 if (!read_len_valid )
1079 goto done;
1080 else
1081 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1082 }
1083 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1062 /* Get Data-In buffer before clean up */ 1084 /* Get Data-In buffer before clean up */
1063 gather_data_area(udev, cmd, true); 1085 gather_data_area(udev, cmd, true, read_len);
1064 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1086 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1065 gather_data_area(udev, cmd, false); 1087 gather_data_area(udev, cmd, false, read_len);
1066 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1088 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1067 /* TODO: */ 1089 /* TODO: */
1068 } else if (se_cmd->data_direction != DMA_NONE) { 1090 } else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1070 se_cmd->data_direction); 1092 se_cmd->data_direction);
1071 } 1093 }
1072 1094
1073 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1095done:
1096 if (read_len_valid) {
1097 pr_debug("read_len = %d\n", read_len);
1098 target_complete_cmd_with_length(cmd->se_cmd,
1099 entry->rsp.scsi_status, read_len);
1100 } else
1101 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1074 1102
1075out: 1103out:
1076 cmd->se_cmd = NULL; 1104 cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
1740 /* Initialise the mailbox of the ring buffer */ 1768 /* Initialise the mailbox of the ring buffer */
1741 mb = udev->mb_addr; 1769 mb = udev->mb_addr;
1742 mb->version = TCMU_MAILBOX_VERSION; 1770 mb->version = TCMU_MAILBOX_VERSION;
1743 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; 1771 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1744 mb->cmdr_off = CMDR_OFF; 1772 mb->cmdr_off = CMDR_OFF;
1745 mb->cmdr_size = udev->cmdr_size; 1773 mb->cmdr_size = udev->cmdr_size;
1746 1774
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 6281266b8ec0..a923ebdeb73c 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
213 goto err_free_acl; 213 goto err_free_acl;
214 } 214 }
215 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); 215 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
216 if (!ret) {
217 /* Notify userspace about the change */
218 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
219 }
216 mutex_unlock(&tb->lock); 220 mutex_unlock(&tb->lock);
217 221
218err_free_acl: 222err_free_acl:
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cbe98bc2b998..431742201709 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -124,6 +124,8 @@ struct n_tty_data {
124 struct mutex output_lock; 124 struct mutex output_lock;
125}; 125};
126 126
127#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
128
127static inline size_t read_cnt(struct n_tty_data *ldata) 129static inline size_t read_cnt(struct n_tty_data *ldata)
128{ 130{
129 return ldata->read_head - ldata->read_tail; 131 return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
141 143
142static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) 144static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
143{ 145{
146 smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
144 return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; 147 return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
145} 148}
146 149
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
316static void reset_buffer_flags(struct n_tty_data *ldata) 319static void reset_buffer_flags(struct n_tty_data *ldata)
317{ 320{
318 ldata->read_head = ldata->canon_head = ldata->read_tail = 0; 321 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
319 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
320 ldata->commit_head = 0; 322 ldata->commit_head = 0;
321 ldata->echo_mark = 0;
322 ldata->line_start = 0; 323 ldata->line_start = 0;
323 324
324 ldata->erasing = 0; 325 ldata->erasing = 0;
@@ -617,13 +618,20 @@ static size_t __process_echoes(struct tty_struct *tty)
617 old_space = space = tty_write_room(tty); 618 old_space = space = tty_write_room(tty);
618 619
619 tail = ldata->echo_tail; 620 tail = ldata->echo_tail;
620 while (ldata->echo_commit != tail) { 621 while (MASK(ldata->echo_commit) != MASK(tail)) {
621 c = echo_buf(ldata, tail); 622 c = echo_buf(ldata, tail);
622 if (c == ECHO_OP_START) { 623 if (c == ECHO_OP_START) {
623 unsigned char op; 624 unsigned char op;
624 int no_space_left = 0; 625 int no_space_left = 0;
625 626
626 /* 627 /*
628 * Since add_echo_byte() is called without holding
629 * output_lock, we might see only portion of multi-byte
630 * operation.
631 */
632 if (MASK(ldata->echo_commit) == MASK(tail + 1))
633 goto not_yet_stored;
634 /*
627 * If the buffer byte is the start of a multi-byte 635 * If the buffer byte is the start of a multi-byte
628 * operation, get the next byte, which is either the 636 * operation, get the next byte, which is either the
629 * op code or a control character value. 637 * op code or a control character value.
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
634 unsigned int num_chars, num_bs; 642 unsigned int num_chars, num_bs;
635 643
636 case ECHO_OP_ERASE_TAB: 644 case ECHO_OP_ERASE_TAB:
645 if (MASK(ldata->echo_commit) == MASK(tail + 2))
646 goto not_yet_stored;
637 num_chars = echo_buf(ldata, tail + 2); 647 num_chars = echo_buf(ldata, tail + 2);
638 648
639 /* 649 /*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
728 /* If the echo buffer is nearly full (so that the possibility exists 738 /* If the echo buffer is nearly full (so that the possibility exists
729 * of echo overrun before the next commit), then discard enough 739 * of echo overrun before the next commit), then discard enough
730 * data at the tail to prevent a subsequent overrun */ 740 * data at the tail to prevent a subsequent overrun */
731 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { 741 while (ldata->echo_commit > tail &&
742 ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
732 if (echo_buf(ldata, tail) == ECHO_OP_START) { 743 if (echo_buf(ldata, tail) == ECHO_OP_START) {
733 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) 744 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
734 tail += 3; 745 tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
738 tail++; 749 tail++;
739 } 750 }
740 751
752 not_yet_stored:
741 ldata->echo_tail = tail; 753 ldata->echo_tail = tail;
742 return old_space - space; 754 return old_space - space;
743} 755}
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
748 size_t nr, old, echoed; 760 size_t nr, old, echoed;
749 size_t head; 761 size_t head;
750 762
763 mutex_lock(&ldata->output_lock);
751 head = ldata->echo_head; 764 head = ldata->echo_head;
752 ldata->echo_mark = head; 765 ldata->echo_mark = head;
753 old = ldata->echo_commit - ldata->echo_tail; 766 old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
756 * is over the threshold (and try again each time another 769 * is over the threshold (and try again each time another
757 * block is accumulated) */ 770 * block is accumulated) */
758 nr = head - ldata->echo_tail; 771 nr = head - ldata->echo_tail;
759 if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) 772 if (nr < ECHO_COMMIT_WATERMARK ||
773 (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
774 mutex_unlock(&ldata->output_lock);
760 return; 775 return;
776 }
761 777
762 mutex_lock(&ldata->output_lock);
763 ldata->echo_commit = head; 778 ldata->echo_commit = head;
764 echoed = __process_echoes(tty); 779 echoed = __process_echoes(tty);
765 mutex_unlock(&ldata->output_lock); 780 mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
810 825
811static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) 826static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
812{ 827{
813 *echo_buf_addr(ldata, ldata->echo_head++) = c; 828 *echo_buf_addr(ldata, ldata->echo_head) = c;
829 smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
830 ldata->echo_head++;
814} 831}
815 832
816/** 833/**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
978 } 995 }
979 996
980 seen_alnums = 0; 997 seen_alnums = 0;
981 while (ldata->read_head != ldata->canon_head) { 998 while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
982 head = ldata->read_head; 999 head = ldata->read_head;
983 1000
984 /* erase a single possibly multibyte character */ 1001 /* erase a single possibly multibyte character */
985 do { 1002 do {
986 head--; 1003 head--;
987 c = read_buf(ldata, head); 1004 c = read_buf(ldata, head);
988 } while (is_continuation(c, tty) && head != ldata->canon_head); 1005 } while (is_continuation(c, tty) &&
1006 MASK(head) != MASK(ldata->canon_head));
989 1007
990 /* do not partially erase */ 1008 /* do not partially erase */
991 if (is_continuation(c, tty)) 1009 if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1027 * This info is used to go back the correct 1045 * This info is used to go back the correct
1028 * number of columns. 1046 * number of columns.
1029 */ 1047 */
1030 while (tail != ldata->canon_head) { 1048 while (MASK(tail) != MASK(ldata->canon_head)) {
1031 tail--; 1049 tail--;
1032 c = read_buf(ldata, tail); 1050 c = read_buf(ldata, tail);
1033 if (c == '\t') { 1051 if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1302 finish_erasing(ldata); 1320 finish_erasing(ldata);
1303 echo_char(c, tty); 1321 echo_char(c, tty);
1304 echo_char_raw('\n', ldata); 1322 echo_char_raw('\n', ldata);
1305 while (tail != ldata->read_head) { 1323 while (MASK(tail) != MASK(ldata->read_head)) {
1306 echo_char(read_buf(ldata, tail), tty); 1324 echo_char(read_buf(ldata, tail), tty);
1307 tail++; 1325 tail++;
1308 } 1326 }
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
1878 struct n_tty_data *ldata; 1896 struct n_tty_data *ldata;
1879 1897
1880 /* Currently a malloc failure here can panic */ 1898 /* Currently a malloc failure here can panic */
1881 ldata = vmalloc(sizeof(*ldata)); 1899 ldata = vzalloc(sizeof(*ldata));
1882 if (!ldata) 1900 if (!ldata)
1883 goto err; 1901 return -ENOMEM;
1884 1902
1885 ldata->overrun_time = jiffies; 1903 ldata->overrun_time = jiffies;
1886 mutex_init(&ldata->atomic_read_lock); 1904 mutex_init(&ldata->atomic_read_lock);
1887 mutex_init(&ldata->output_lock); 1905 mutex_init(&ldata->output_lock);
1888 1906
1889 tty->disc_data = ldata; 1907 tty->disc_data = ldata;
1890 reset_buffer_flags(tty->disc_data);
1891 ldata->column = 0;
1892 ldata->canon_column = 0;
1893 ldata->num_overrun = 0;
1894 ldata->no_room = 0;
1895 ldata->lnext = 0;
1896 tty->closing = 0; 1908 tty->closing = 0;
1897 /* indicate buffer work may resume */ 1909 /* indicate buffer work may resume */
1898 clear_bit(TTY_LDISC_HALTED, &tty->flags); 1910 clear_bit(TTY_LDISC_HALTED, &tty->flags);
1899 n_tty_set_termios(tty, NULL); 1911 n_tty_set_termios(tty, NULL);
1900 tty_unthrottle(tty); 1912 tty_unthrottle(tty);
1901
1902 return 0; 1913 return 0;
1903err:
1904 return -ENOMEM;
1905} 1914}
1906 1915
1907static inline int input_available_p(struct tty_struct *tty, int poll) 1916static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
2411 tail = ldata->read_tail; 2420 tail = ldata->read_tail;
2412 nr = head - tail; 2421 nr = head - tail;
2413 /* Skip EOF-chars.. */ 2422 /* Skip EOF-chars.. */
2414 while (head != tail) { 2423 while (MASK(head) != MASK(tail)) {
2415 if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && 2424 if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
2416 read_buf(ldata, tail) == __DISABLED_CHAR) 2425 read_buf(ldata, tail) == __DISABLED_CHAR)
2417 nr--; 2426 nr--;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index df93b727e984..9e59f4788589 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
617static void __exit serdev_exit(void) 617static void __exit serdev_exit(void)
618{ 618{
619 bus_unregister(&serdev_bus_type); 619 bus_unregister(&serdev_bus_type);
620 ida_destroy(&ctrl_ida);
620} 621}
621module_exit(serdev_exit); 622module_exit(serdev_exit);
622 623
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3296a05cda2d..f80a300b5d68 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
3339 /* multi-io cards handled by parport_serial */ 3339 /* multi-io cards handled by parport_serial */
3340 { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ 3340 { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
3341 { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ 3341 { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
3342 { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
3343 { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ 3342 { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
3344 { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
3345 3343
3346 /* Moxa Smartio MUE boards handled by 8250_moxa */ 3344 /* Moxa Smartio MUE boards handled by 8250_moxa */
3347 { PCI_VDEVICE(MOXA, 0x1024), }, 3345 { PCI_VDEVICE(MOXA, 0x1024), },
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 1eb1a376a041..15eb6c829d39 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
784 if (!*vc->vc_uni_pagedir_loc) 784 if (!*vc->vc_uni_pagedir_loc)
785 con_set_default_unimap(vc); 785 con_set_default_unimap(vc);
786 786
787 vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); 787 vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
788 if (!vc->vc_screenbuf) 788 if (!vc->vc_screenbuf)
789 goto err_free; 789 goto err_free;
790 790
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
871 871
872 if (new_screen_size > (4 << 20)) 872 if (new_screen_size > (4 << 20))
873 return -EINVAL; 873 return -EINVAL;
874 newscreen = kmalloc(new_screen_size, GFP_USER); 874 newscreen = kzalloc(new_screen_size, GFP_USER);
875 if (!newscreen) 875 if (!newscreen)
876 return -ENOMEM; 876 return -ENOMEM;
877 877
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index e8f4ac9400ea..5d421d7e8904 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev,
215 struct device_attribute *attr, char *buf) 215 struct device_attribute *attr, char *buf)
216{ 216{
217 struct uio_device *idev = dev_get_drvdata(dev); 217 struct uio_device *idev = dev_get_drvdata(dev);
218 return sprintf(buf, "%s\n", idev->info->name); 218 int ret;
219
220 mutex_lock(&idev->info_lock);
221 if (!idev->info) {
222 ret = -EINVAL;
223 dev_err(dev, "the device has been unregistered\n");
224 goto out;
225 }
226
227 ret = sprintf(buf, "%s\n", idev->info->name);
228
229out:
230 mutex_unlock(&idev->info_lock);
231 return ret;
219} 232}
220static DEVICE_ATTR_RO(name); 233static DEVICE_ATTR_RO(name);
221 234
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev,
223 struct device_attribute *attr, char *buf) 236 struct device_attribute *attr, char *buf)
224{ 237{
225 struct uio_device *idev = dev_get_drvdata(dev); 238 struct uio_device *idev = dev_get_drvdata(dev);
226 return sprintf(buf, "%s\n", idev->info->version); 239 int ret;
240
241 mutex_lock(&idev->info_lock);
242 if (!idev->info) {
243 ret = -EINVAL;
244 dev_err(dev, "the device has been unregistered\n");
245 goto out;
246 }
247
248 ret = sprintf(buf, "%s\n", idev->info->version);
249
250out:
251 mutex_unlock(&idev->info_lock);
252 return ret;
227} 253}
228static DEVICE_ATTR_RO(version); 254static DEVICE_ATTR_RO(version);
229 255
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
415static irqreturn_t uio_interrupt(int irq, void *dev_id) 441static irqreturn_t uio_interrupt(int irq, void *dev_id)
416{ 442{
417 struct uio_device *idev = (struct uio_device *)dev_id; 443 struct uio_device *idev = (struct uio_device *)dev_id;
418 irqreturn_t ret = idev->info->handler(irq, idev->info); 444 irqreturn_t ret;
419 445
446 mutex_lock(&idev->info_lock);
447
448 ret = idev->info->handler(irq, idev->info);
420 if (ret == IRQ_HANDLED) 449 if (ret == IRQ_HANDLED)
421 uio_event_notify(idev->info); 450 uio_event_notify(idev->info);
422 451
452 mutex_unlock(&idev->info_lock);
423 return ret; 453 return ret;
424} 454}
425 455
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep)
433 struct uio_device *idev; 463 struct uio_device *idev;
434 struct uio_listener *listener; 464 struct uio_listener *listener;
435 int ret = 0; 465 int ret = 0;
436 unsigned long flags;
437 466
438 mutex_lock(&minor_lock); 467 mutex_lock(&minor_lock);
439 idev = idr_find(&uio_idr, iminor(inode)); 468 idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep)
460 listener->event_count = atomic_read(&idev->event); 489 listener->event_count = atomic_read(&idev->event);
461 filep->private_data = listener; 490 filep->private_data = listener;
462 491
463 spin_lock_irqsave(&idev->info_lock, flags); 492 mutex_lock(&idev->info_lock);
493 if (!idev->info) {
494 mutex_unlock(&idev->info_lock);
495 ret = -EINVAL;
496 goto err_alloc_listener;
497 }
498
464 if (idev->info && idev->info->open) 499 if (idev->info && idev->info->open)
465 ret = idev->info->open(idev->info, inode); 500 ret = idev->info->open(idev->info, inode);
466 spin_unlock_irqrestore(&idev->info_lock, flags); 501 mutex_unlock(&idev->info_lock);
467 if (ret) 502 if (ret)
468 goto err_infoopen; 503 goto err_infoopen;
469 504
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep)
495 int ret = 0; 530 int ret = 0;
496 struct uio_listener *listener = filep->private_data; 531 struct uio_listener *listener = filep->private_data;
497 struct uio_device *idev = listener->dev; 532 struct uio_device *idev = listener->dev;
498 unsigned long flags;
499 533
500 spin_lock_irqsave(&idev->info_lock, flags); 534 mutex_lock(&idev->info_lock);
501 if (idev->info && idev->info->release) 535 if (idev->info && idev->info->release)
502 ret = idev->info->release(idev->info, inode); 536 ret = idev->info->release(idev->info, inode);
503 spin_unlock_irqrestore(&idev->info_lock, flags); 537 mutex_unlock(&idev->info_lock);
504 538
505 module_put(idev->owner); 539 module_put(idev->owner);
506 kfree(listener); 540 kfree(listener);
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
513 struct uio_listener *listener = filep->private_data; 547 struct uio_listener *listener = filep->private_data;
514 struct uio_device *idev = listener->dev; 548 struct uio_device *idev = listener->dev;
515 __poll_t ret = 0; 549 __poll_t ret = 0;
516 unsigned long flags;
517 550
518 spin_lock_irqsave(&idev->info_lock, flags); 551 mutex_lock(&idev->info_lock);
519 if (!idev->info || !idev->info->irq) 552 if (!idev->info || !idev->info->irq)
520 ret = -EIO; 553 ret = -EIO;
521 spin_unlock_irqrestore(&idev->info_lock, flags); 554 mutex_unlock(&idev->info_lock);
522 555
523 if (ret) 556 if (ret)
524 return ret; 557 return ret;
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
537 DECLARE_WAITQUEUE(wait, current); 570 DECLARE_WAITQUEUE(wait, current);
538 ssize_t retval = 0; 571 ssize_t retval = 0;
539 s32 event_count; 572 s32 event_count;
540 unsigned long flags;
541 573
542 spin_lock_irqsave(&idev->info_lock, flags); 574 mutex_lock(&idev->info_lock);
543 if (!idev->info || !idev->info->irq) 575 if (!idev->info || !idev->info->irq)
544 retval = -EIO; 576 retval = -EIO;
545 spin_unlock_irqrestore(&idev->info_lock, flags); 577 mutex_unlock(&idev->info_lock);
546 578
547 if (retval) 579 if (retval)
548 return retval; 580 return retval;
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
592 struct uio_device *idev = listener->dev; 624 struct uio_device *idev = listener->dev;
593 ssize_t retval; 625 ssize_t retval;
594 s32 irq_on; 626 s32 irq_on;
595 unsigned long flags;
596 627
597 spin_lock_irqsave(&idev->info_lock, flags); 628 mutex_lock(&idev->info_lock);
629 if (!idev->info) {
630 retval = -EINVAL;
631 goto out;
632 }
633
598 if (!idev->info || !idev->info->irq) { 634 if (!idev->info || !idev->info->irq) {
599 retval = -EIO; 635 retval = -EIO;
600 goto out; 636 goto out;
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
618 retval = idev->info->irqcontrol(idev->info, irq_on); 654 retval = idev->info->irqcontrol(idev->info, irq_on);
619 655
620out: 656out:
621 spin_unlock_irqrestore(&idev->info_lock, flags); 657 mutex_unlock(&idev->info_lock);
622 return retval ? retval : sizeof(s32); 658 return retval ? retval : sizeof(s32);
623} 659}
624 660
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
640 struct page *page; 676 struct page *page;
641 unsigned long offset; 677 unsigned long offset;
642 void *addr; 678 void *addr;
679 int ret = 0;
680 int mi;
643 681
644 int mi = uio_find_mem_index(vmf->vma); 682 mutex_lock(&idev->info_lock);
645 if (mi < 0) 683 if (!idev->info) {
646 return VM_FAULT_SIGBUS; 684 ret = VM_FAULT_SIGBUS;
685 goto out;
686 }
687
688 mi = uio_find_mem_index(vmf->vma);
689 if (mi < 0) {
690 ret = VM_FAULT_SIGBUS;
691 goto out;
692 }
647 693
648 /* 694 /*
649 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 695 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
658 page = vmalloc_to_page(addr); 704 page = vmalloc_to_page(addr);
659 get_page(page); 705 get_page(page);
660 vmf->page = page; 706 vmf->page = page;
661 return 0; 707
708out:
709 mutex_unlock(&idev->info_lock);
710
711 return ret;
662} 712}
663 713
664static const struct vm_operations_struct uio_logical_vm_ops = { 714static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
683 struct uio_device *idev = vma->vm_private_data; 733 struct uio_device *idev = vma->vm_private_data;
684 int mi = uio_find_mem_index(vma); 734 int mi = uio_find_mem_index(vma);
685 struct uio_mem *mem; 735 struct uio_mem *mem;
736
686 if (mi < 0) 737 if (mi < 0)
687 return -EINVAL; 738 return -EINVAL;
688 mem = idev->info->mem + mi; 739 mem = idev->info->mem + mi;
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
724 775
725 vma->vm_private_data = idev; 776 vma->vm_private_data = idev;
726 777
778 mutex_lock(&idev->info_lock);
779 if (!idev->info) {
780 ret = -EINVAL;
781 goto out;
782 }
783
727 mi = uio_find_mem_index(vma); 784 mi = uio_find_mem_index(vma);
728 if (mi < 0) 785 if (mi < 0) {
729 return -EINVAL; 786 ret = -EINVAL;
787 goto out;
788 }
730 789
731 requested_pages = vma_pages(vma); 790 requested_pages = vma_pages(vma);
732 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) 791 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
733 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; 792 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
734 if (requested_pages > actual_pages) 793 if (requested_pages > actual_pages) {
735 return -EINVAL; 794 ret = -EINVAL;
795 goto out;
796 }
736 797
737 if (idev->info->mmap) { 798 if (idev->info->mmap) {
738 ret = idev->info->mmap(idev->info, vma); 799 ret = idev->info->mmap(idev->info, vma);
739 return ret; 800 goto out;
740 } 801 }
741 802
742 switch (idev->info->mem[mi].memtype) { 803 switch (idev->info->mem[mi].memtype) {
743 case UIO_MEM_PHYS: 804 case UIO_MEM_PHYS:
744 return uio_mmap_physical(vma); 805 ret = uio_mmap_physical(vma);
806 break;
745 case UIO_MEM_LOGICAL: 807 case UIO_MEM_LOGICAL:
746 case UIO_MEM_VIRTUAL: 808 case UIO_MEM_VIRTUAL:
747 return uio_mmap_logical(vma); 809 ret = uio_mmap_logical(vma);
810 break;
748 default: 811 default:
749 return -EINVAL; 812 ret = -EINVAL;
750 } 813 }
814
815out:
816 mutex_unlock(&idev->info_lock);
817 return 0;
751} 818}
752 819
753static const struct file_operations uio_fops = { 820static const struct file_operations uio_fops = {
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner,
865 932
866 idev->owner = owner; 933 idev->owner = owner;
867 idev->info = info; 934 idev->info = info;
868 spin_lock_init(&idev->info_lock); 935 mutex_init(&idev->info_lock);
869 init_waitqueue_head(&idev->wait); 936 init_waitqueue_head(&idev->wait);
870 atomic_set(&idev->event, 0); 937 atomic_set(&idev->event, 0);
871 938
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner,
902 * FDs at the time of unregister and therefore may not be 969 * FDs at the time of unregister and therefore may not be
903 * freed until they are released. 970 * freed until they are released.
904 */ 971 */
905 ret = request_irq(info->irq, uio_interrupt, 972 ret = request_threaded_irq(info->irq, NULL, uio_interrupt,
906 info->irq_flags, info->name, idev); 973 info->irq_flags, info->name, idev);
974
907 if (ret) 975 if (ret)
908 goto err_request_irq; 976 goto err_request_irq;
909 } 977 }
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
928void uio_unregister_device(struct uio_info *info) 996void uio_unregister_device(struct uio_info *info)
929{ 997{
930 struct uio_device *idev; 998 struct uio_device *idev;
931 unsigned long flags;
932 999
933 if (!info || !info->uio_dev) 1000 if (!info || !info->uio_dev)
934 return; 1001 return;
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info)
937 1004
938 uio_free_minor(idev); 1005 uio_free_minor(idev);
939 1006
1007 mutex_lock(&idev->info_lock);
940 uio_dev_del_attributes(idev); 1008 uio_dev_del_attributes(idev);
941 1009
942 if (info->irq && info->irq != UIO_IRQ_CUSTOM) 1010 if (info->irq && info->irq != UIO_IRQ_CUSTOM)
943 free_irq(info->irq, idev); 1011 free_irq(info->irq, idev);
944 1012
945 spin_lock_irqsave(&idev->info_lock, flags);
946 idev->info = NULL; 1013 idev->info = NULL;
947 spin_unlock_irqrestore(&idev->info_lock, flags); 1014 mutex_unlock(&idev->info_lock);
948 1015
949 device_unregister(&idev->dev); 1016 device_unregister(&idev->dev);
950 1017
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index af45aa3222b5..4638d9b066be 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
124 124
125 hcd->power_budget = ci->platdata->power_budget; 125 hcd->power_budget = ci->platdata->power_budget;
126 hcd->tpl_support = ci->platdata->tpl_support; 126 hcd->tpl_support = ci->platdata->tpl_support;
127 if (ci->phy || ci->usb_phy) 127 if (ci->phy || ci->usb_phy) {
128 hcd->skip_phy_initialization = 1; 128 hcd->skip_phy_initialization = 1;
129 if (ci->usb_phy)
130 hcd->usb_phy = ci->usb_phy;
131 }
129 132
130 ehci = hcd_to_ehci(hcd); 133 ehci = hcd_to_ehci(hcd);
131 ehci->caps = ci->hw_bank.cap; 134 ehci->caps = ci->hw_bank.cap;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b366a6c0b49..998b32d0167e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
1758 { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ 1758 { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1759 .driver_info = SINGLE_RX_URB, 1759 .driver_info = SINGLE_RX_URB,
1760 }, 1760 },
1761 { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
1762 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1763 },
1761 { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ 1764 { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1762 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1765 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1763 }, 1766 },
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c55def2f1320..097057d2eacf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
378 /* Corsair K70 RGB */ 378 /* Corsair K70 RGB */
379 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 379 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
380 380
381 /* Corsair Strafe */
382 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
383 USB_QUIRK_DELAY_CTRL_MSG },
384
381 /* Corsair Strafe RGB */ 385 /* Corsair Strafe RGB */
382 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | 386 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
383 USB_QUIRK_DELAY_CTRL_MSG }, 387 USB_QUIRK_DELAY_CTRL_MSG },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 4a56ac772a3c..71b3b08ad516 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
1004 * @frame_list_sz: Frame list size 1004 * @frame_list_sz: Frame list size
1005 * @desc_gen_cache: Kmem cache for generic descriptors 1005 * @desc_gen_cache: Kmem cache for generic descriptors
1006 * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors 1006 * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
1007 * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
1007 * 1008 *
1008 * These are for peripheral mode: 1009 * These are for peripheral mode:
1009 * 1010 *
@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
1177 u32 frame_list_sz; 1178 u32 frame_list_sz;
1178 struct kmem_cache *desc_gen_cache; 1179 struct kmem_cache *desc_gen_cache;
1179 struct kmem_cache *desc_hsisoc_cache; 1180 struct kmem_cache *desc_hsisoc_cache;
1181 struct kmem_cache *unaligned_cache;
1182#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
1180 1183
1181#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */ 1184#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
1182 1185
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index f0d9ccf1d665..a0f82cca2d9a 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
812 u32 index; 812 u32 index;
813 u32 maxsize = 0; 813 u32 maxsize = 0;
814 u32 mask = 0; 814 u32 mask = 0;
815 u8 pid = 0;
815 816
816 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask); 817 maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
817 818
@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
840 ((len << DEV_DMA_NBYTES_SHIFT) & mask)); 841 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
841 842
842 if (hs_ep->dir_in) { 843 if (hs_ep->dir_in) {
843 desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) & 844 if (len)
845 pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
846 else
847 pid = 1;
848 desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
844 DEV_DMA_ISOC_PID_MASK) | 849 DEV_DMA_ISOC_PID_MASK) |
845 ((len % hs_ep->ep.maxpacket) ? 850 ((len % hs_ep->ep.maxpacket) ?
846 DEV_DMA_SHORT : 0) | 851 DEV_DMA_SHORT : 0) |
@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
884 struct dwc2_dma_desc *desc; 889 struct dwc2_dma_desc *desc;
885 890
886 if (list_empty(&hs_ep->queue)) { 891 if (list_empty(&hs_ep->queue)) {
892 hs_ep->target_frame = TARGET_FRAME_INITIAL;
887 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__); 893 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
888 return; 894 return;
889 } 895 }
@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2755 */ 2761 */
2756 tmp = dwc2_hsotg_read_frameno(hsotg); 2762 tmp = dwc2_hsotg_read_frameno(hsotg);
2757 2763
2758 dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
2759
2760 if (using_desc_dma(hsotg)) { 2764 if (using_desc_dma(hsotg)) {
2761 if (ep->target_frame == TARGET_FRAME_INITIAL) { 2765 if (ep->target_frame == TARGET_FRAME_INITIAL) {
2762 /* Start first ISO Out */ 2766 /* Start first ISO Out */
@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2817 2821
2818 tmp = dwc2_hsotg_read_frameno(hsotg); 2822 tmp = dwc2_hsotg_read_frameno(hsotg);
2819 if (using_desc_dma(hsotg)) { 2823 if (using_desc_dma(hsotg)) {
2820 dwc2_hsotg_complete_request(hsotg, hs_ep,
2821 get_ep_head(hs_ep), 0);
2822
2823 hs_ep->target_frame = tmp; 2824 hs_ep->target_frame = tmp;
2824 dwc2_gadget_incr_frame_num(hs_ep); 2825 dwc2_gadget_incr_frame_num(hs_ep);
2825 dwc2_gadget_start_isoc_ddma(hs_ep); 2826 dwc2_gadget_start_isoc_ddma(hs_ep);
@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
4739 } 4740 }
4740 4741
4741 ret = usb_add_gadget_udc(dev, &hsotg->gadget); 4742 ret = usb_add_gadget_udc(dev, &hsotg->gadget);
4742 if (ret) 4743 if (ret) {
4744 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
4745 hsotg->ctrl_req);
4743 return ret; 4746 return ret;
4744 4747 }
4745 dwc2_hsotg_dump(hsotg); 4748 dwc2_hsotg_dump(hsotg);
4746 4749
4747 return 0; 4750 return 0;
@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
4755int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg) 4758int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
4756{ 4759{
4757 usb_del_gadget_udc(&hsotg->gadget); 4760 usb_del_gadget_udc(&hsotg->gadget);
4761 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
4758 4762
4759 return 0; 4763 return 0;
4760} 4764}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index edaf0b6af4f0..b1104be3429c 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
1567 } 1567 }
1568 1568
1569 if (hsotg->params.host_dma) { 1569 if (hsotg->params.host_dma) {
1570 dwc2_writel((u32)chan->xfer_dma, 1570 dma_addr_t dma_addr;
1571 hsotg->regs + HCDMA(chan->hc_num)); 1571
1572 if (chan->align_buf) {
1573 if (dbg_hc(chan))
1574 dev_vdbg(hsotg->dev, "align_buf\n");
1575 dma_addr = chan->align_buf;
1576 } else {
1577 dma_addr = chan->xfer_dma;
1578 }
1579 dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
1580
1572 if (dbg_hc(chan)) 1581 if (dbg_hc(chan))
1573 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n", 1582 dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
1574 (unsigned long)chan->xfer_dma, chan->hc_num); 1583 (unsigned long)dma_addr, chan->hc_num);
1575 } 1584 }
1576 1585
1577 /* Start the split */ 1586 /* Start the split */
@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
2625 } 2634 }
2626} 2635}
2627 2636
2637static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
2638 struct dwc2_qh *qh,
2639 struct dwc2_host_chan *chan)
2640{
2641 if (!hsotg->unaligned_cache ||
2642 chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
2643 return -ENOMEM;
2644
2645 if (!qh->dw_align_buf) {
2646 qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
2647 GFP_ATOMIC | GFP_DMA);
2648 if (!qh->dw_align_buf)
2649 return -ENOMEM;
2650 }
2651
2652 qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
2653 DWC2_KMEM_UNALIGNED_BUF_SIZE,
2654 DMA_FROM_DEVICE);
2655
2656 if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
2657 dev_err(hsotg->dev, "can't map align_buf\n");
2658 chan->align_buf = 0;
2659 return -EINVAL;
2660 }
2661
2662 chan->align_buf = qh->dw_align_buf_dma;
2663 return 0;
2664}
2665
2628#define DWC2_USB_DMA_ALIGN 4 2666#define DWC2_USB_DMA_ALIGN 4
2629 2667
2630struct dma_aligned_buffer { 2668struct dma_aligned_buffer {
@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
2802 /* Set the transfer attributes */ 2840 /* Set the transfer attributes */
2803 dwc2_hc_init_xfer(hsotg, chan, qtd); 2841 dwc2_hc_init_xfer(hsotg, chan, qtd);
2804 2842
2843 /* For non-dword aligned buffers */
2844 if (hsotg->params.host_dma && qh->do_split &&
2845 chan->ep_is_in && (chan->xfer_dma & 0x3)) {
2846 dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
2847 if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
2848 dev_err(hsotg->dev,
2849 "Failed to allocate memory to handle non-aligned buffer\n");
2850 /* Add channel back to free list */
2851 chan->align_buf = 0;
2852 chan->multi_count = 0;
2853 list_add_tail(&chan->hc_list_entry,
2854 &hsotg->free_hc_list);
2855 qtd->in_process = 0;
2856 qh->channel = NULL;
2857 return -ENOMEM;
2858 }
2859 } else {
2860 /*
2861 * We assume that DMA is always aligned in non-split
2862 * case or split out case. Warn if not.
2863 */
2864 WARN_ON_ONCE(hsotg->params.host_dma &&
2865 (chan->xfer_dma & 0x3));
2866 chan->align_buf = 0;
2867 }
2868
2805 if (chan->ep_type == USB_ENDPOINT_XFER_INT || 2869 if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
2806 chan->ep_type == USB_ENDPOINT_XFER_ISOC) 2870 chan->ep_type == USB_ENDPOINT_XFER_ISOC)
2807 /* 2871 /*
@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
5246 } 5310 }
5247 } 5311 }
5248 5312
5313 if (hsotg->params.host_dma) {
5314 /*
5315 * Create kmem caches to handle non-aligned buffer
5316 * in Buffer DMA mode.
5317 */
5318 hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
5319 DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
5320 SLAB_CACHE_DMA, NULL);
5321 if (!hsotg->unaligned_cache)
5322 dev_err(hsotg->dev,
5323 "unable to create dwc2 unaligned cache\n");
5324 }
5325
5249 hsotg->otg_port = 1; 5326 hsotg->otg_port = 1;
5250 hsotg->frame_list = NULL; 5327 hsotg->frame_list = NULL;
5251 hsotg->frame_list_dma = 0; 5328 hsotg->frame_list_dma = 0;
@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
5280 return 0; 5357 return 0;
5281 5358
5282error4: 5359error4:
5283 kmem_cache_destroy(hsotg->desc_gen_cache); 5360 kmem_cache_destroy(hsotg->unaligned_cache);
5284 kmem_cache_destroy(hsotg->desc_hsisoc_cache); 5361 kmem_cache_destroy(hsotg->desc_hsisoc_cache);
5362 kmem_cache_destroy(hsotg->desc_gen_cache);
5285error3: 5363error3:
5286 dwc2_hcd_release(hsotg); 5364 dwc2_hcd_release(hsotg);
5287error2: 5365error2:
@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
5322 usb_remove_hcd(hcd); 5400 usb_remove_hcd(hcd);
5323 hsotg->priv = NULL; 5401 hsotg->priv = NULL;
5324 5402
5325 kmem_cache_destroy(hsotg->desc_gen_cache); 5403 kmem_cache_destroy(hsotg->unaligned_cache);
5326 kmem_cache_destroy(hsotg->desc_hsisoc_cache); 5404 kmem_cache_destroy(hsotg->desc_hsisoc_cache);
5405 kmem_cache_destroy(hsotg->desc_gen_cache);
5327 5406
5328 dwc2_hcd_release(hsotg); 5407 dwc2_hcd_release(hsotg);
5329 usb_put_hcd(hcd); 5408 usb_put_hcd(hcd);
@@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
5435 dwc2_writel(hprt0, hsotg->regs + HPRT0); 5514 dwc2_writel(hprt0, hsotg->regs + HPRT0);
5436 5515
5437 /* Wait for the HPRT0.PrtSusp register field to be set */ 5516 /* Wait for the HPRT0.PrtSusp register field to be set */
5438 if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300)) 5517 if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
5439 dev_warn(hsotg->dev, "Suspend wasn't generated\n"); 5518 dev_warn(hsotg->dev, "Suspend wasn't generated\n");
5440 5519
5441 /* 5520 /*
@@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
5616 return ret; 5695 return ret;
5617 } 5696 }
5618 5697
5698 dwc2_hcd_rem_wakeup(hsotg);
5699
5619 hsotg->hibernated = 0; 5700 hsotg->hibernated = 0;
5620 hsotg->bus_suspended = 0; 5701 hsotg->bus_suspended = 0;
5621 hsotg->lx_state = DWC2_L0; 5702 hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7db1ee7e7a77..5502a501f516 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -76,6 +76,8 @@ struct dwc2_qh;
76 * (micro)frame 76 * (micro)frame
77 * @xfer_buf: Pointer to current transfer buffer position 77 * @xfer_buf: Pointer to current transfer buffer position
78 * @xfer_dma: DMA address of xfer_buf 78 * @xfer_dma: DMA address of xfer_buf
79 * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
80 * DWORD aligned
79 * @xfer_len: Total number of bytes to transfer 81 * @xfer_len: Total number of bytes to transfer
80 * @xfer_count: Number of bytes transferred so far 82 * @xfer_count: Number of bytes transferred so far
81 * @start_pkt_count: Packet count at start of transfer 83 * @start_pkt_count: Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
133 135
134 u8 *xfer_buf; 136 u8 *xfer_buf;
135 dma_addr_t xfer_dma; 137 dma_addr_t xfer_dma;
138 dma_addr_t align_buf;
136 u32 xfer_len; 139 u32 xfer_len;
137 u32 xfer_count; 140 u32 xfer_count;
138 u16 start_pkt_count; 141 u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
302 * speed. Note that this is in "schedule slice" which 305 * speed. Note that this is in "schedule slice" which
303 * is tightly packed. 306 * is tightly packed.
304 * @ntd: Actual number of transfer descriptors in a list 307 * @ntd: Actual number of transfer descriptors in a list
308 * @dw_align_buf: Used instead of original buffer if its physical address
309 * is not dword-aligned
310 * @dw_align_buf_dma: DMA address for dw_align_buf
305 * @qtd_list: List of QTDs for this QH 311 * @qtd_list: List of QTDs for this QH
306 * @channel: Host channel currently processing transfers for this QH 312 * @channel: Host channel currently processing transfers for this QH
307 * @qh_list_entry: Entry for QH in either the periodic or non-periodic 313 * @qh_list_entry: Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
350 struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES]; 356 struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
351 u32 ls_start_schedule_slice; 357 u32 ls_start_schedule_slice;
352 u16 ntd; 358 u16 ntd;
359 u8 *dw_align_buf;
360 dma_addr_t dw_align_buf_dma;
353 struct list_head qtd_list; 361 struct list_head qtd_list;
354 struct dwc2_host_chan *channel; 362 struct dwc2_host_chan *channel;
355 struct list_head qh_list_entry; 363 struct list_head qh_list_entry;
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index fbea5e3fb947..ed7f05cf4906 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
942 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index]; 942 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
943 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd, 943 len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
944 DWC2_HC_XFER_COMPLETE, NULL); 944 DWC2_HC_XFER_COMPLETE, NULL);
945 if (!len) { 945 if (!len && !qtd->isoc_split_offset) {
946 qtd->complete_split = 0; 946 qtd->complete_split = 0;
947 qtd->isoc_split_offset = 0;
948 return 0; 947 return 0;
949 } 948 }
950 949
951 frame_desc->actual_length += len; 950 frame_desc->actual_length += len;
952 951
952 if (chan->align_buf) {
953 dev_vdbg(hsotg->dev, "non-aligned buffer\n");
954 dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
955 DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
956 memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
957 chan->qh->dw_align_buf, len);
958 }
959
953 qtd->isoc_split_offset += len; 960 qtd->isoc_split_offset += len;
954 961
955 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum)); 962 hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index d7c3d6c776d8..301ced1618f8 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
383 /* Get the map and adjust if this is a multi_tt hub */ 383 /* Get the map and adjust if this is a multi_tt hub */
384 map = qh->dwc_tt->periodic_bitmaps; 384 map = qh->dwc_tt->periodic_bitmaps;
385 if (qh->dwc_tt->usb_tt->multi) 385 if (qh->dwc_tt->usb_tt->multi)
386 map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; 386 map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
387 387
388 return map; 388 return map;
389} 389}
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
1696 1696
1697 if (qh->desc_list) 1697 if (qh->desc_list)
1698 dwc2_hcd_qh_free_ddma(hsotg, qh); 1698 dwc2_hcd_qh_free_ddma(hsotg, qh);
1699 else if (hsotg->unaligned_cache && qh->dw_align_buf)
1700 kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
1701
1699 kfree(qh); 1702 kfree(qh);
1700} 1703}
1701 1704
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ea91310113b9..103807587dc6 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
1272 if (!dwc->clks) 1272 if (!dwc->clks)
1273 return -ENOMEM; 1273 return -ENOMEM;
1274 1274
1275 dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
1276 dwc->dev = dev; 1275 dwc->dev = dev;
1277 1276
1278 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1277 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
1307 if (IS_ERR(dwc->reset)) 1306 if (IS_ERR(dwc->reset))
1308 return PTR_ERR(dwc->reset); 1307 return PTR_ERR(dwc->reset);
1309 1308
1310 ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks); 1309 if (dev->of_node) {
1311 if (ret == -EPROBE_DEFER) 1310 dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
1312 return ret; 1311
1313 /* 1312 ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
1314 * Clocks are optional, but new DT platforms should support all clocks 1313 if (ret == -EPROBE_DEFER)
1315 * as required by the DT-binding. 1314 return ret;
1316 */ 1315 /*
1317 if (ret) 1316 * Clocks are optional, but new DT platforms should support all
1318 dwc->num_clks = 0; 1317 * clocks as required by the DT-binding.
1318 */
1319 if (ret)
1320 dwc->num_clks = 0;
1321 }
1319 1322
1320 ret = reset_control_deassert(dwc->reset); 1323 ret = reset_control_deassert(dwc->reset);
1321 if (ret) 1324 if (ret)
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 6b3ccd542bd7..dbeff5e6ad14 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
165 165
166 reset_control_put(simple->resets); 166 reset_control_put(simple->resets);
167 167
168 pm_runtime_put_sync(dev);
169 pm_runtime_disable(dev); 168 pm_runtime_disable(dev);
169 pm_runtime_put_noidle(dev);
170 pm_runtime_set_suspended(dev);
170 171
171 return 0; 172 return 0;
172} 173}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index c961a94d136b..f57e7c94b8e5 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,7 @@
34#define PCI_DEVICE_ID_INTEL_GLK 0x31aa 34#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
35#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee 35#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
36#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 36#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
37#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
37 38
38#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 39#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
39#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 40#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
289 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, 290 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
290 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), }, 291 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
291 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), }, 292 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
293 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
292 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 294 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
293 { } /* Terminating Entry */ 295 { } /* Terminating Entry */
294}; 296};
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index b0e67ab2f98c..a6d0203e40b6 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
490 qcom->dwc3 = of_find_device_by_node(dwc3_np); 490 qcom->dwc3 = of_find_device_by_node(dwc3_np);
491 if (!qcom->dwc3) { 491 if (!qcom->dwc3) {
492 dev_err(&pdev->dev, "failed to get dwc3 platform device\n"); 492 dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
493 ret = -ENODEV;
493 goto depopulate; 494 goto depopulate;
494 } 495 }
495 496
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
547 return 0; 548 return 0;
548} 549}
549 550
550#ifdef CONFIG_PM_SLEEP 551static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
551static int dwc3_qcom_pm_suspend(struct device *dev)
552{ 552{
553 struct dwc3_qcom *qcom = dev_get_drvdata(dev); 553 struct dwc3_qcom *qcom = dev_get_drvdata(dev);
554 int ret = 0; 554 int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
560 return ret; 560 return ret;
561} 561}
562 562
563static int dwc3_qcom_pm_resume(struct device *dev) 563static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
564{ 564{
565 struct dwc3_qcom *qcom = dev_get_drvdata(dev); 565 struct dwc3_qcom *qcom = dev_get_drvdata(dev);
566 int ret; 566 int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
571 571
572 return ret; 572 return ret;
573} 573}
574#endif
575 574
576#ifdef CONFIG_PM 575static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
577static int dwc3_qcom_runtime_suspend(struct device *dev)
578{ 576{
579 struct dwc3_qcom *qcom = dev_get_drvdata(dev); 577 struct dwc3_qcom *qcom = dev_get_drvdata(dev);
580 578
581 return dwc3_qcom_suspend(qcom); 579 return dwc3_qcom_suspend(qcom);
582} 580}
583 581
584static int dwc3_qcom_runtime_resume(struct device *dev) 582static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
585{ 583{
586 struct dwc3_qcom *qcom = dev_get_drvdata(dev); 584 struct dwc3_qcom *qcom = dev_get_drvdata(dev);
587 585
588 return dwc3_qcom_resume(qcom); 586 return dwc3_qcom_resume(qcom);
589} 587}
590#endif
591 588
592static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { 589static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
593 SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) 590 SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f242c2bcea81..d2fa071c21b1 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1719 */ 1719 */
1720 if (w_value && !f->get_alt) 1720 if (w_value && !f->get_alt)
1721 break; 1721 break;
1722
1723 spin_lock(&cdev->lock);
1722 value = f->set_alt(f, w_index, w_value); 1724 value = f->set_alt(f, w_index, w_value);
1723 if (value == USB_GADGET_DELAYED_STATUS) { 1725 if (value == USB_GADGET_DELAYED_STATUS) {
1724 DBG(cdev, 1726 DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1728 DBG(cdev, "delayed_status count %d\n", 1730 DBG(cdev, "delayed_status count %d\n",
1729 cdev->delayed_status); 1731 cdev->delayed_status);
1730 } 1732 }
1733 spin_unlock(&cdev->lock);
1731 break; 1734 break;
1732 case USB_REQ_GET_INTERFACE: 1735 case USB_REQ_GET_INTERFACE:
1733 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) 1736 if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index dce9d12c7981..33e2030503fa 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -215,6 +215,7 @@ struct ffs_io_data {
215 215
216 struct mm_struct *mm; 216 struct mm_struct *mm;
217 struct work_struct work; 217 struct work_struct work;
218 struct work_struct cancellation_work;
218 219
219 struct usb_ep *ep; 220 struct usb_ep *ep;
220 struct usb_request *req; 221 struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
1072 return 0; 1073 return 0;
1073} 1074}
1074 1075
1076static void ffs_aio_cancel_worker(struct work_struct *work)
1077{
1078 struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
1079 cancellation_work);
1080
1081 ENTER();
1082
1083 usb_ep_dequeue(io_data->ep, io_data->req);
1084}
1085
1075static int ffs_aio_cancel(struct kiocb *kiocb) 1086static int ffs_aio_cancel(struct kiocb *kiocb)
1076{ 1087{
1077 struct ffs_io_data *io_data = kiocb->private; 1088 struct ffs_io_data *io_data = kiocb->private;
1078 struct ffs_epfile *epfile = kiocb->ki_filp->private_data; 1089 struct ffs_data *ffs = io_data->ffs;
1079 int value; 1090 int value;
1080 1091
1081 ENTER(); 1092 ENTER();
1082 1093
1083 spin_lock_irq(&epfile->ffs->eps_lock); 1094 if (likely(io_data && io_data->ep && io_data->req)) {
1084 1095 INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
1085 if (likely(io_data && io_data->ep && io_data->req)) 1096 queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
1086 value = usb_ep_dequeue(io_data->ep, io_data->req); 1097 value = -EINPROGRESS;
1087 else 1098 } else {
1088 value = -EINVAL; 1099 value = -EINVAL;
1089 1100 }
1090 spin_unlock_irq(&epfile->ffs->eps_lock);
1091 1101
1092 return value; 1102 return value;
1093} 1103}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
index f0cdf89b8503..83ba8a2eb6af 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
@@ -2,6 +2,7 @@
2config USB_ASPEED_VHUB 2config USB_ASPEED_VHUB
3 tristate "Aspeed vHub UDC driver" 3 tristate "Aspeed vHub UDC driver"
4 depends on ARCH_ASPEED || COMPILE_TEST 4 depends on ARCH_ASPEED || COMPILE_TEST
5 depends on USB_LIBCOMPOSITE
5 help 6 help
6 USB peripheral controller for the Aspeed AST2500 family 7 USB peripheral controller for the Aspeed AST2500 family
7 SoCs supporting the "vHub" functionality and USB2.0 8 SoCs supporting the "vHub" functionality and USB2.0
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 1fbfd89d0a0f..387f124a8334 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
508 return 0; 508 return 0;
509} 509}
510 510
511static void xhci_do_dbc_stop(struct xhci_hcd *xhci) 511static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
512{ 512{
513 struct xhci_dbc *dbc = xhci->dbc; 513 struct xhci_dbc *dbc = xhci->dbc;
514 514
515 if (dbc->state == DS_DISABLED) 515 if (dbc->state == DS_DISABLED)
516 return; 516 return -1;
517 517
518 writel(0, &dbc->regs->control); 518 writel(0, &dbc->regs->control);
519 xhci_dbc_mem_cleanup(xhci); 519 xhci_dbc_mem_cleanup(xhci);
520 dbc->state = DS_DISABLED; 520 dbc->state = DS_DISABLED;
521
522 return 0;
521} 523}
522 524
523static int xhci_dbc_start(struct xhci_hcd *xhci) 525static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
544 546
545static void xhci_dbc_stop(struct xhci_hcd *xhci) 547static void xhci_dbc_stop(struct xhci_hcd *xhci)
546{ 548{
549 int ret;
547 unsigned long flags; 550 unsigned long flags;
548 struct xhci_dbc *dbc = xhci->dbc; 551 struct xhci_dbc *dbc = xhci->dbc;
549 struct dbc_port *port = &dbc->port; 552 struct dbc_port *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
556 xhci_dbc_tty_unregister_device(xhci); 559 xhci_dbc_tty_unregister_device(xhci);
557 560
558 spin_lock_irqsave(&dbc->lock, flags); 561 spin_lock_irqsave(&dbc->lock, flags);
559 xhci_do_dbc_stop(xhci); 562 ret = xhci_do_dbc_stop(xhci);
560 spin_unlock_irqrestore(&dbc->lock, flags); 563 spin_unlock_irqrestore(&dbc->lock, flags);
561 564
562 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 565 if (!ret)
566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
563} 567}
564 568
565static void 569static void
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index acbd3d7b8828..ef350c33dc4a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
595 if (!ep->stream_info) 595 if (!ep->stream_info)
596 return NULL; 596 return NULL;
597 597
598 if (stream_id > ep->stream_info->num_streams) 598 if (stream_id >= ep->stream_info->num_streams)
599 return NULL; 599 return NULL;
600 return ep->stream_info->stream_rings[stream_id]; 600 return ep->stream_info->stream_rings[stream_id];
601} 601}
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
886 886
887 dev = xhci->devs[slot_id]; 887 dev = xhci->devs[slot_id];
888 888
889 trace_xhci_free_virt_device(dev);
890
891 xhci->dcbaa->dev_context_ptrs[slot_id] = 0; 889 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
892 if (!dev) 890 if (!dev)
893 return; 891 return;
894 892
893 trace_xhci_free_virt_device(dev);
894
895 if (dev->tt_info) 895 if (dev->tt_info)
896 old_active_eps = dev->tt_info->active_eps; 896 old_active_eps = dev->tt_info->active_eps;
897 897
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a8c1d073cba0..4b463e5202a4 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
481 unsigned long mask; 481 unsigned long mask;
482 unsigned int port; 482 unsigned int port;
483 bool idle, enable; 483 bool idle, enable;
484 int err; 484 int err = 0;
485 485
486 memset(&rsp, 0, sizeof(rsp)); 486 memset(&rsp, 0, sizeof(rsp));
487 487
@@ -1223,10 +1223,10 @@ disable_rpm:
1223 pm_runtime_disable(&pdev->dev); 1223 pm_runtime_disable(&pdev->dev);
1224 usb_put_hcd(tegra->hcd); 1224 usb_put_hcd(tegra->hcd);
1225disable_xusbc: 1225disable_xusbc:
1226 if (!&pdev->dev.pm_domain) 1226 if (!pdev->dev.pm_domain)
1227 tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC); 1227 tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
1228disable_xusba: 1228disable_xusba:
1229 if (!&pdev->dev.pm_domain) 1229 if (!pdev->dev.pm_domain)
1230 tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA); 1230 tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
1231put_padctl: 1231put_padctl:
1232 tegra_xusb_padctl_put(tegra->padctl); 1232 tegra_xusb_padctl_put(tegra->padctl);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 410544ffe78f..88b427434bd8 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
171 TP_ARGS(ring, trb) 171 TP_ARGS(ring, trb)
172); 172);
173 173
174DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
175 TP_PROTO(struct xhci_virt_device *vdev),
176 TP_ARGS(vdev),
177 TP_STRUCT__entry(
178 __field(void *, vdev)
179 __field(unsigned long long, out_ctx)
180 __field(unsigned long long, in_ctx)
181 __field(u8, fake_port)
182 __field(u8, real_port)
183 __field(u16, current_mel)
184
185 ),
186 TP_fast_assign(
187 __entry->vdev = vdev;
188 __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
189 __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
190 __entry->fake_port = (u8) vdev->fake_port;
191 __entry->real_port = (u8) vdev->real_port;
192 __entry->current_mel = (u16) vdev->current_mel;
193 ),
194 TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
195 __entry->vdev, __entry->in_ctx, __entry->out_ctx,
196 __entry->fake_port, __entry->real_port, __entry->current_mel
197 )
198);
199
200DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
201 TP_PROTO(struct xhci_virt_device *vdev),
202 TP_ARGS(vdev)
203);
204
174DECLARE_EVENT_CLASS(xhci_log_virt_dev, 205DECLARE_EVENT_CLASS(xhci_log_virt_dev,
175 TP_PROTO(struct xhci_virt_device *vdev), 206 TP_PROTO(struct xhci_virt_device *vdev),
176 TP_ARGS(vdev), 207 TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
208 TP_ARGS(vdev) 239 TP_ARGS(vdev)
209); 240);
210 241
211DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
212 TP_PROTO(struct xhci_virt_device *vdev),
213 TP_ARGS(vdev)
214);
215
216DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device, 242DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
217 TP_PROTO(struct xhci_virt_device *vdev), 243 TP_PROTO(struct xhci_virt_device *vdev),
218 TP_ARGS(vdev) 244 TP_ARGS(vdev)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8c8da2d657fa..2f4850f25e82 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
908 spin_unlock_irqrestore(&xhci->lock, flags); 908 spin_unlock_irqrestore(&xhci->lock, flags);
909} 909}
910 910
911static bool xhci_pending_portevent(struct xhci_hcd *xhci)
912{
913 struct xhci_port **ports;
914 int port_index;
915 u32 status;
916 u32 portsc;
917
918 status = readl(&xhci->op_regs->status);
919 if (status & STS_EINT)
920 return true;
921 /*
922 * Checking STS_EINT is not enough as there is a lag between a change
923 * bit being set and the Port Status Change Event that it generated
924 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
925 */
926
927 port_index = xhci->usb2_rhub.num_ports;
928 ports = xhci->usb2_rhub.ports;
929 while (port_index--) {
930 portsc = readl(ports[port_index]->addr);
931 if (portsc & PORT_CHANGE_MASK ||
932 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
933 return true;
934 }
935 port_index = xhci->usb3_rhub.num_ports;
936 ports = xhci->usb3_rhub.ports;
937 while (port_index--) {
938 portsc = readl(ports[port_index]->addr);
939 if (portsc & PORT_CHANGE_MASK ||
940 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
941 return true;
942 }
943 return false;
944}
945
911/* 946/*
912 * Stop HC (not bus-specific) 947 * Stop HC (not bus-specific)
913 * 948 *
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
1009 */ 1044 */
1010int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 1045int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1011{ 1046{
1012 u32 command, temp = 0, status; 1047 u32 command, temp = 0;
1013 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1048 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1014 struct usb_hcd *secondary_hcd; 1049 struct usb_hcd *secondary_hcd;
1015 int retval = 0; 1050 int retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1043 command = readl(&xhci->op_regs->command); 1078 command = readl(&xhci->op_regs->command);
1044 command |= CMD_CRS; 1079 command |= CMD_CRS;
1045 writel(command, &xhci->op_regs->command); 1080 writel(command, &xhci->op_regs->command);
1081 /*
1082 * Some controllers take up to 55+ ms to complete the controller
1083 * restore so setting the timeout to 100ms. Xhci specification
1084 * doesn't mention any timeout value.
1085 */
1046 if (xhci_handshake(&xhci->op_regs->status, 1086 if (xhci_handshake(&xhci->op_regs->status,
1047 STS_RESTORE, 0, 10 * 1000)) { 1087 STS_RESTORE, 0, 100 * 1000)) {
1048 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 1088 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1049 spin_unlock_irq(&xhci->lock); 1089 spin_unlock_irq(&xhci->lock);
1050 return -ETIMEDOUT; 1090 return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1134 done: 1174 done:
1135 if (retval == 0) { 1175 if (retval == 0) {
1136 /* Resume root hubs only when have pending events. */ 1176 /* Resume root hubs only when have pending events. */
1137 status = readl(&xhci->op_regs->status); 1177 if (xhci_pending_portevent(xhci)) {
1138 if (status & STS_EINT) {
1139 usb_hcd_resume_root_hub(xhci->shared_hcd); 1178 usb_hcd_resume_root_hub(xhci->shared_hcd);
1140 usb_hcd_resume_root_hub(hcd); 1179 usb_hcd_resume_root_hub(hcd);
1141 } 1180 }
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 939e2f86b595..841e89ffe2e9 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -382,6 +382,10 @@ struct xhci_op_regs {
382#define PORT_PLC (1 << 22) 382#define PORT_PLC (1 << 22)
383/* port configure error change - port failed to configure its link partner */ 383/* port configure error change - port failed to configure its link partner */
384#define PORT_CEC (1 << 23) 384#define PORT_CEC (1 << 23)
385#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
386 PORT_RC | PORT_PLC | PORT_CEC)
387
388
385/* Cold Attach Status - xHC can set this bit to report device attached during 389/* Cold Attach Status - xHC can set this bit to report device attached during
386 * Sx state. Warm port reset should be perfomed to clear this bit and move port 390 * Sx state. Warm port reset should be perfomed to clear this bit and move port
387 * to connected state. 391 * to connected state.
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 8abb6cbbd98a..3be40eaa1ac9 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
396 loff_t *ppos) 396 loff_t *ppos)
397{ 397{
398 struct usb_yurex *dev; 398 struct usb_yurex *dev;
399 int retval = 0; 399 int len = 0;
400 int bytes_read = 0;
401 char in_buffer[20]; 400 char in_buffer[20];
402 unsigned long flags; 401 unsigned long flags;
403 402
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
405 404
406 mutex_lock(&dev->io_mutex); 405 mutex_lock(&dev->io_mutex);
407 if (!dev->interface) { /* already disconnected */ 406 if (!dev->interface) { /* already disconnected */
408 retval = -ENODEV; 407 mutex_unlock(&dev->io_mutex);
409 goto exit; 408 return -ENODEV;
410 } 409 }
411 410
412 spin_lock_irqsave(&dev->lock, flags); 411 spin_lock_irqsave(&dev->lock, flags);
413 bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); 412 len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
414 spin_unlock_irqrestore(&dev->lock, flags); 413 spin_unlock_irqrestore(&dev->lock, flags);
415
416 if (*ppos < bytes_read) {
417 if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
418 retval = -EFAULT;
419 else {
420 retval = bytes_read - *ppos;
421 *ppos += bytes_read;
422 }
423 }
424
425exit:
426 mutex_unlock(&dev->io_mutex); 414 mutex_unlock(&dev->io_mutex);
427 return retval; 415
416 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
428} 417}
429 418
430static ssize_t yurex_write(struct file *file, const char __user *user_buffer, 419static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index bdd7a5ad3bf1..3bb1fff02bed 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
128 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, 128 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
129 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 129 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
130 value, index, buf, bufsize, DEFAULT_TIMEOUT); 130 value, index, buf, bufsize, DEFAULT_TIMEOUT);
131 if (r < bufsize) { 131 if (r < (int)bufsize) {
132 if (r >= 0) { 132 if (r >= 0) {
133 dev_err(&dev->dev, 133 dev_err(&dev->dev,
134 "short control message received (%d < %u)\n", 134 "short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eb6c26cbe579..626a29d9aa58 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
95 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ 95 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
96 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 96 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
97 { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ 97 { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
98 { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
99 { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
100 { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
98 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ 101 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
99 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ 102 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
100 { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ 103 { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
112 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ 115 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
113 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ 116 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
114 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ 117 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
118 { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
119 { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
120 { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
115 { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ 121 { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
116 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 122 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
117 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 123 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
124 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ 130 { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
125 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ 131 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
126 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ 132 { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
133 { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
127 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ 134 { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
135 { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
128 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ 136 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
129 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ 137 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
130 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 138 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = {
134 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ 142 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
135 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 143 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
136 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 144 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
145 { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
146 { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
137 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 147 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
138 { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ 148 { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
139 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 149 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
140 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 150 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
151 { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
152 { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
141 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 153 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
142 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 154 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
143 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ 155 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
144 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 156 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
145 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 157 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
158 { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
146 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 159 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
147 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 160 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
161 { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
162 { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
148 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 163 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
149 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ 164 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
150 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ 165 { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 5169624d8b11..38d43c4b7ce5 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
369 3, /* get pins */ 369 3, /* get pins */
370 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 370 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
371 0, 0, data, 1, 2000); 371 0, 0, data, 1, 2000);
372 if (rc >= 0) 372 if (rc == 1)
373 *value = *data; 373 *value = *data;
374 else if (rc >= 0)
375 rc = -EIO;
374 376
375 kfree(data); 377 kfree(data);
376 return rc; 378 return rc;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index fdceb46d9fc6..b580b4c7fa48 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
468 } 468 }
469 469
470 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); 470 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
471 if (urb->actual_length < 1)
472 goto out;
473
471 dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, 474 dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
472 mos7840_port->MsrLsr, mos7840_port->port_num); 475 mos7840_port->MsrLsr, mos7840_port->port_num);
473 data = urb->transfer_buffer; 476 data = urb->transfer_buffer;
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 8a201dd53d36..150f43668bec 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
418 u64 ts_nsec = local_clock(); 418 u64 ts_nsec = local_clock();
419 unsigned long rem_nsec; 419 unsigned long rem_nsec;
420 420
421 mutex_lock(&port->logbuffer_lock);
421 if (!port->logbuffer[port->logbuffer_head]) { 422 if (!port->logbuffer[port->logbuffer_head]) {
422 port->logbuffer[port->logbuffer_head] = 423 port->logbuffer[port->logbuffer_head] =
423 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL); 424 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
424 if (!port->logbuffer[port->logbuffer_head]) 425 if (!port->logbuffer[port->logbuffer_head]) {
426 mutex_unlock(&port->logbuffer_lock);
425 return; 427 return;
428 }
426 } 429 }
427 430
428 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args); 431 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
429 432
430 mutex_lock(&port->logbuffer_lock);
431
432 if (tcpm_log_full(port)) { 433 if (tcpm_log_full(port)) {
433 port->logbuffer_head = max(port->logbuffer_head - 1, 0); 434 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
434 strcpy(tmpbuffer, "overflow"); 435 strcpy(tmpbuffer, "overflow");
@@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
724 725
725 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); 726 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
726 727
728 port->supply_voltage = mv;
729 port->current_limit = max_ma;
730
727 if (port->tcpc->set_current_limit) 731 if (port->tcpc->set_current_limit)
728 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); 732 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
729 733
@@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
2594 tcpm_set_attached_state(port, false); 2598 tcpm_set_attached_state(port, false);
2595 port->try_src_count = 0; 2599 port->try_src_count = 0;
2596 port->try_snk_count = 0; 2600 port->try_snk_count = 0;
2597 port->supply_voltage = 0;
2598 port->current_limit = 0;
2599 port->usb_type = POWER_SUPPLY_USB_TYPE_C; 2601 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
2600 2602
2601 power_supply_changed(port->psy); 2603 power_supply_changed(port->psy);
@@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port)
3043 tcpm_port_is_sink(port) && 3045 tcpm_port_is_sink(port) &&
3044 time_is_after_jiffies(port->delayed_runtime)) { 3046 time_is_after_jiffies(port->delayed_runtime)) {
3045 tcpm_set_state(port, SNK_DISCOVERY, 3047 tcpm_set_state(port, SNK_DISCOVERY,
3046 port->delayed_runtime - jiffies); 3048 jiffies_to_msecs(port->delayed_runtime -
3049 jiffies));
3047 break; 3050 break;
3048 } 3051 }
3049 tcpm_set_state(port, unattached_state(port), 0); 3052 tcpm_set_state(port, unattached_state(port), 0);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bd5cca5632b3..8d0a6fe748bd 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
350 } 350 }
351 351
352 if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) { 352 if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
353 typec_set_pwr_role(con->port, con->status.pwr_dir);
354
355 switch (con->status.partner_type) {
356 case UCSI_CONSTAT_PARTNER_TYPE_UFP:
357 typec_set_data_role(con->port, TYPEC_HOST);
358 break;
359 case UCSI_CONSTAT_PARTNER_TYPE_DFP:
360 typec_set_data_role(con->port, TYPEC_DEVICE);
361 break;
362 default:
363 break;
364 }
365
353 if (con->status.connected) 366 if (con->status.connected)
354 ucsi_register_partner(con); 367 ucsi_register_partner(con);
355 else 368 else
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 44eb4e1ea817..a18112a83fae 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
79 return -ENODEV; 79 return -ENODEV;
80 } 80 }
81 81
82 /* This will make sure we can use ioremap_nocache() */
83 status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
84 if (ACPI_FAILURE(status))
85 return -ENOMEM;
86
82 /* 87 /*
83 * NOTE: The memory region for the data structures is used also in an 88 * NOTE: The memory region for the data structures is used also in an
84 * operation region, which means ACPI has already reserved it. Therefore 89 * operation region, which means ACPI has already reserved it. Therefore
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 24ee2605b9f0..42dc1d3d71cf 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
28 def_bool y if !S390 28 def_bool y if !S390
29 29
30config VFIO_PCI_IGD 30config VFIO_PCI_IGD
31 depends on VFIO_PCI 31 bool "VFIO PCI extensions for Intel graphics (GVT-d)"
32 def_bool y if X86 32 depends on VFIO_PCI && X86
33 default y
34 help
35 Support for Intel IGD specific extensions to enable direct
36 assignment to virtual machines. This includes exposing an IGD
37 specific firmware table and read-only copies of the host bridge
38 and LPC bridge config space.
39
40 To enable Intel IGD assignment through vfio-pci, say Y.
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2c75b33db4ac..3e5b17710a4f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
343 struct page *page[1]; 343 struct page *page[1];
344 struct vm_area_struct *vma; 344 struct vm_area_struct *vma;
345 struct vm_area_struct *vmas[1]; 345 struct vm_area_struct *vmas[1];
346 unsigned int flags = 0;
346 int ret; 347 int ret;
347 348
349 if (prot & IOMMU_WRITE)
350 flags |= FOLL_WRITE;
351
352 down_read(&mm->mmap_sem);
348 if (mm == current->mm) { 353 if (mm == current->mm) {
349 ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), 354 ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
350 page, vmas);
351 } else { 355 } else {
352 unsigned int flags = 0;
353
354 if (prot & IOMMU_WRITE)
355 flags |= FOLL_WRITE;
356
357 down_read(&mm->mmap_sem);
358 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, 356 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
359 vmas, NULL); 357 vmas, NULL);
360 /* 358 /*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
368 ret = -EOPNOTSUPP; 366 ret = -EOPNOTSUPP;
369 put_page(page[0]); 367 put_page(page[0]);
370 } 368 }
371 up_read(&mm->mmap_sem);
372 } 369 }
370 up_read(&mm->mmap_sem);
373 371
374 if (ret == 1) { 372 if (ret == 1) {
375 *pfn = page_to_pfn(page[0]); 373 *pfn = page_to_pfn(page[0]);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 686dc670fd29..29756d88799b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1226,7 +1226,8 @@ err_used:
1226 if (ubufs) 1226 if (ubufs)
1227 vhost_net_ubuf_put_wait_and_free(ubufs); 1227 vhost_net_ubuf_put_wait_and_free(ubufs);
1228err_ubufs: 1228err_ubufs:
1229 sockfd_put(sock); 1229 if (sock)
1230 sockfd_put(sock);
1230err_vq: 1231err_vq:
1231 mutex_unlock(&vq->mutex); 1232 mutex_unlock(&vq->mutex);
1232err: 1233err:
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 451e833f5931..48b154276179 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
41xen-evtchn-y := evtchn.o 41xen-evtchn-y := evtchn.o
42xen-gntdev-y := gntdev.o 42xen-gntdev-y := gntdev.o
43xen-gntalloc-y := gntalloc.o 43xen-gntalloc-y := gntalloc.o
44xen-privcmd-y := privcmd.o 44xen-privcmd-y := privcmd.o privcmd-buf.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 762378f1811c..08e4af04d6f2 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
628 xen_irq_info_cleanup(info); 628 xen_irq_info_cleanup(info);
629 } 629 }
630 630
631 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
632
633 xen_free_irq(irq); 631 xen_free_irq(irq);
634} 632}
635 633
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 2473b0a9e6e4..ba9f3eec2bd0 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
799 799
800 return 0; 800 return 0;
801} 801}
802EXPORT_SYMBOL(gnttab_alloc_pages); 802EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
803 803
804/** 804/**
805 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages() 805 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
820 } 820 }
821 free_xenballooned_pages(nr_pages, pages); 821 free_xenballooned_pages(nr_pages, pages);
822} 822}
823EXPORT_SYMBOL(gnttab_free_pages); 823EXPORT_SYMBOL_GPL(gnttab_free_pages);
824 824
825/* Handling of paged out grant targets (GNTST_eagain) */ 825/* Handling of paged out grant targets (GNTST_eagain) */
826#define MAX_DELAY 256 826#define MAX_DELAY 256
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 8835065029d3..c93d8ef8df34 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
289 return; 289 return;
290 } 290 }
291 291
292 if (sysrq_key != '\0') 292 if (sysrq_key != '\0') {
293 xenbus_printf(xbt, "control", "sysrq", "%c", '\0'); 293 err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
294 if (err) {
295 pr_err("%s: Error %d writing sysrq in control/sysrq\n",
296 __func__, err);
297 xenbus_transaction_end(xbt, 1);
298 return;
299 }
300 }
294 301
295 err = xenbus_transaction_end(xbt, 0); 302 err = xenbus_transaction_end(xbt, 0);
296 if (err == -EAGAIN) 303 if (err == -EAGAIN)
@@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void)
342 continue; 349 continue;
343 snprintf(node, FEATURE_PATH_SIZE, "feature-%s", 350 snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
344 shutdown_handlers[idx].command); 351 shutdown_handlers[idx].command);
345 xenbus_printf(XBT_NIL, "control", node, "%u", 1); 352 err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
353 if (err) {
354 pr_err("%s: Error %d writing %s\n", __func__,
355 err, node);
356 return err;
357 }
346 } 358 }
347 359
348 return 0; 360 return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644
index 000000000000..df1ed37c3269
--- /dev/null
+++ b/drivers/xen/privcmd-buf.c
@@ -0,0 +1,210 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/******************************************************************************
4 * privcmd-buf.c
5 *
6 * Mmap of hypercall buffers.
7 *
8 * Copyright (c) 2018 Juergen Gross
9 */
10
11#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/list.h>
16#include <linux/miscdevice.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19
20#include "privcmd.h"
21
22MODULE_LICENSE("GPL");
23
24static unsigned int limit = 64;
25module_param(limit, uint, 0644);
26MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
27 "the privcmd-buf device per open file");
28
29struct privcmd_buf_private {
30 struct mutex lock;
31 struct list_head list;
32 unsigned int allocated;
33};
34
35struct privcmd_buf_vma_private {
36 struct privcmd_buf_private *file_priv;
37 struct list_head list;
38 unsigned int users;
39 unsigned int n_pages;
40 struct page *pages[];
41};
42
43static int privcmd_buf_open(struct inode *ino, struct file *file)
44{
45 struct privcmd_buf_private *file_priv;
46
47 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
48 if (!file_priv)
49 return -ENOMEM;
50
51 mutex_init(&file_priv->lock);
52 INIT_LIST_HEAD(&file_priv->list);
53
54 file->private_data = file_priv;
55
56 return 0;
57}
58
59static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
60{
61 unsigned int i;
62
63 vma_priv->file_priv->allocated -= vma_priv->n_pages;
64
65 list_del(&vma_priv->list);
66
67 for (i = 0; i < vma_priv->n_pages; i++)
68 if (vma_priv->pages[i])
69 __free_page(vma_priv->pages[i]);
70
71 kfree(vma_priv);
72}
73
74static int privcmd_buf_release(struct inode *ino, struct file *file)
75{
76 struct privcmd_buf_private *file_priv = file->private_data;
77 struct privcmd_buf_vma_private *vma_priv;
78
79 mutex_lock(&file_priv->lock);
80
81 while (!list_empty(&file_priv->list)) {
82 vma_priv = list_first_entry(&file_priv->list,
83 struct privcmd_buf_vma_private,
84 list);
85 privcmd_buf_vmapriv_free(vma_priv);
86 }
87
88 mutex_unlock(&file_priv->lock);
89
90 kfree(file_priv);
91
92 return 0;
93}
94
95static void privcmd_buf_vma_open(struct vm_area_struct *vma)
96{
97 struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
98
99 if (!vma_priv)
100 return;
101
102 mutex_lock(&vma_priv->file_priv->lock);
103 vma_priv->users++;
104 mutex_unlock(&vma_priv->file_priv->lock);
105}
106
107static void privcmd_buf_vma_close(struct vm_area_struct *vma)
108{
109 struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
110 struct privcmd_buf_private *file_priv;
111
112 if (!vma_priv)
113 return;
114
115 file_priv = vma_priv->file_priv;
116
117 mutex_lock(&file_priv->lock);
118
119 vma_priv->users--;
120 if (!vma_priv->users)
121 privcmd_buf_vmapriv_free(vma_priv);
122
123 mutex_unlock(&file_priv->lock);
124}
125
126static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
127{
128 pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
129 vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
130 vmf->pgoff, (void *)vmf->address);
131
132 return VM_FAULT_SIGBUS;
133}
134
135static const struct vm_operations_struct privcmd_buf_vm_ops = {
136 .open = privcmd_buf_vma_open,
137 .close = privcmd_buf_vma_close,
138 .fault = privcmd_buf_vma_fault,
139};
140
141static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
142{
143 struct privcmd_buf_private *file_priv = file->private_data;
144 struct privcmd_buf_vma_private *vma_priv;
145 unsigned long count = vma_pages(vma);
146 unsigned int i;
147 int ret = 0;
148
149 if (!(vma->vm_flags & VM_SHARED) || count > limit ||
150 file_priv->allocated + count > limit)
151 return -EINVAL;
152
153 vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
154 GFP_KERNEL);
155 if (!vma_priv)
156 return -ENOMEM;
157
158 vma_priv->n_pages = count;
159 count = 0;
160 for (i = 0; i < vma_priv->n_pages; i++) {
161 vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
162 if (!vma_priv->pages[i])
163 break;
164 count++;
165 }
166
167 mutex_lock(&file_priv->lock);
168
169 file_priv->allocated += count;
170
171 vma_priv->file_priv = file_priv;
172 vma_priv->users = 1;
173
174 vma->vm_flags |= VM_IO | VM_DONTEXPAND;
175 vma->vm_ops = &privcmd_buf_vm_ops;
176 vma->vm_private_data = vma_priv;
177
178 list_add(&vma_priv->list, &file_priv->list);
179
180 if (vma_priv->n_pages != count)
181 ret = -ENOMEM;
182 else
183 for (i = 0; i < vma_priv->n_pages; i++) {
184 ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
185 vma_priv->pages[i]);
186 if (ret)
187 break;
188 }
189
190 if (ret)
191 privcmd_buf_vmapriv_free(vma_priv);
192
193 mutex_unlock(&file_priv->lock);
194
195 return ret;
196}
197
198const struct file_operations xen_privcmdbuf_fops = {
199 .owner = THIS_MODULE,
200 .open = privcmd_buf_open,
201 .release = privcmd_buf_release,
202 .mmap = privcmd_buf_mmap,
203};
204EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
205
206struct miscdevice xen_privcmdbuf_dev = {
207 .minor = MISC_DYNAMIC_MINOR,
208 .name = "xen/hypercall",
209 .fops = &xen_privcmdbuf_fops,
210};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8ae0349d9f0a..7e6e682104dc 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1007,12 +1007,21 @@ static int __init privcmd_init(void)
1007 pr_err("Could not register Xen privcmd device\n"); 1007 pr_err("Could not register Xen privcmd device\n");
1008 return err; 1008 return err;
1009 } 1009 }
1010
1011 err = misc_register(&xen_privcmdbuf_dev);
1012 if (err != 0) {
1013 pr_err("Could not register Xen hypercall-buf device\n");
1014 misc_deregister(&privcmd_dev);
1015 return err;
1016 }
1017
1010 return 0; 1018 return 0;
1011} 1019}
1012 1020
1013static void __exit privcmd_exit(void) 1021static void __exit privcmd_exit(void)
1014{ 1022{
1015 misc_deregister(&privcmd_dev); 1023 misc_deregister(&privcmd_dev);
1024 misc_deregister(&xen_privcmdbuf_dev);
1016} 1025}
1017 1026
1018module_init(privcmd_init); 1027module_init(privcmd_init);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
index 14facaeed36f..0dd9f8f67ee3 100644
--- a/drivers/xen/privcmd.h
+++ b/drivers/xen/privcmd.h
@@ -1,3 +1,6 @@
1#include <linux/fs.h> 1#include <linux/fs.h>
2 2
3extern const struct file_operations xen_privcmd_fops; 3extern const struct file_operations xen_privcmd_fops;
4extern const struct file_operations xen_privcmdbuf_fops;
5
6extern struct miscdevice xen_privcmdbuf_dev;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7bc88fd43cfc..e2f3e8b0fba9 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
1012{ 1012{
1013 struct v2p_entry *entry; 1013 struct v2p_entry *entry;
1014 unsigned long flags; 1014 unsigned long flags;
1015 int err;
1015 1016
1016 if (try) { 1017 if (try) {
1017 spin_lock_irqsave(&info->v2p_lock, flags); 1018 spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
1027 scsiback_del_translation_entry(info, vir); 1028 scsiback_del_translation_entry(info, vir);
1028 } 1029 }
1029 } else if (!try) { 1030 } else if (!try) {
1030 xenbus_printf(XBT_NIL, info->dev->nodename, state, 1031 err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
1031 "%d", XenbusStateClosed); 1032 "%d", XenbusStateClosed);
1033 if (err)
1034 xenbus_dev_error(info->dev, err,
1035 "%s: writing %s", __func__, state);
1032 } 1036 }
1033} 1037}
1034 1038
@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
1067 snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); 1071 snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
1068 val = xenbus_read(XBT_NIL, dev->nodename, str, NULL); 1072 val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
1069 if (IS_ERR(val)) { 1073 if (IS_ERR(val)) {
1070 xenbus_printf(XBT_NIL, dev->nodename, state, 1074 err = xenbus_printf(XBT_NIL, dev->nodename, state,
1071 "%d", XenbusStateClosed); 1075 "%d", XenbusStateClosed);
1076 if (err)
1077 xenbus_dev_error(info->dev, err,
1078 "%s: writing %s", __func__, state);
1072 return; 1079 return;
1073 } 1080 }
1074 strlcpy(phy, val, VSCSI_NAMELEN); 1081 strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
1079 err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u", 1086 err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
1080 &vir.hst, &vir.chn, &vir.tgt, &vir.lun); 1087 &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
1081 if (XENBUS_EXIST_ERR(err)) { 1088 if (XENBUS_EXIST_ERR(err)) {
1082 xenbus_printf(XBT_NIL, dev->nodename, state, 1089 err = xenbus_printf(XBT_NIL, dev->nodename, state,
1083 "%d", XenbusStateClosed); 1090 "%d", XenbusStateClosed);
1091 if (err)
1092 xenbus_dev_error(info->dev, err,
1093 "%s: writing %s", __func__, state);
1084 return; 1094 return;
1085 } 1095 }
1086 1096