aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-03-20 06:27:18 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-03-20 06:27:18 -0400
commit4958134df54c2c84e9c22ea042761d439164d26e (patch)
tree503177afab11f7d25b12a84ce25b481d305c51ba /drivers
parentc4f528795d1add8b63652673f7262729f679c6c1 (diff)
parentc698ca5278934c0ae32297a8725ced2e27585d7f (diff)
Merge 4.16-rc6 into tty-next
We want the serial/tty fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c6
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/swim.c2
-rw-r--r--drivers/block/xen-blkfront.c17
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/bluetooth/btusb.c25
-rw-r--r--drivers/bluetooth/hci_bcm.c7
-rw-r--r--drivers/bus/ti-sysc.c2
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c4
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c8
-rw-r--r--drivers/char/tpm/tpm_tis_core.c5
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/arc_timer.c11
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c2
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c16
-rw-r--r--drivers/crypto/ccp/psp-dev.c8
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma/mv_xor_v2.c25
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/efi/libstub/tpm.c4
-rw-r--r--drivers/gpio/gpio-rcar.c38
-rw-r--r--drivers/gpio/gpiolib-of.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c169
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c11
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c71
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c40
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c99
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c17
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c1
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/infiniband/core/addr.c15
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h25
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/main.c21
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c19
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c10
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/touchscreen/mms114.c15
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c13
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c14
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/bcache/super.c29
-rw-r--r--drivers/md/dm-bufio.c16
-rw-r--r--drivers/md/dm-mpath.c77
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c35
-rw-r--r--drivers/md/md-multipath.c2
-rw-r--r--drivers/md/md.c53
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid1.h12
-rw-r--r--drivers/md/raid10.c18
-rw-r--r--drivers/md/raid10.h13
-rw-r--r--drivers/md/raid5-log.h3
-rw-r--r--drivers/md/raid5-ppl.c10
-rw-r--r--drivers/md/raid5.c19
-rw-r--r--drivers/md/raid5.h12
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/common/videobuf2/Kconfig3
-rw-r--r--drivers/media/common/videobuf2/Makefile9
-rw-r--r--drivers/media/common/videobuf2/vb2-trace.c (renamed from drivers/media/v4l2-core/vb2-trace.c)0
-rw-r--r--drivers/media/dvb-core/Makefile2
-rw-r--r--drivers/media/dvb-core/dmxdev.c115
-rw-r--r--drivers/media/dvb-core/dvb_demux.c112
-rw-r--r--drivers/media/dvb-core/dvb_net.c5
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c31
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c7
-rw-r--r--drivers/media/i2c/tvp5150.c88
-rw-r--r--drivers/media/pci/ttpci/av7110.c5
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c6
-rw-r--r--drivers/media/usb/au0828/Kconfig2
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c10
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/Makefile3
-rw-r--r--drivers/memory/brcmstb_dpfe.c74
-rw-r--r--drivers/misc/ocxl/file.c27
-rw-r--r--drivers/mmc/core/mmc_ops.c4
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c1
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c1
-rw-r--r--drivers/mmc/host/dw_mmc-zx.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c84
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c35
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c29
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h11
-rw-r--r--drivers/net/hyperv/netvsc.c33
-rw-r--r--drivers/net/hyperv/netvsc_drv.c62
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c18
-rw-r--r--drivers/net/ppp/ppp_generic.c9
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/virtio_net.c62
-rw-r--r--drivers/net/wan/hdlc_ppp.c5
-rw-r--r--drivers/net/xen-netfront.c7
-rw-r--r--drivers/nvdimm/pmem.c3
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/fabrics.c7
-rw-r--r--drivers/nvme/host/fc.c27
-rw-r--r--drivers/nvme/host/multipath.c43
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/nvme/host/rdma.c4
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c2
-rw-r--r--drivers/pci/setup-res.c4
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c5
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-axg.c4
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/x86/Kconfig28
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/dell-smbios-base.c (renamed from drivers/platform/x86/dell-smbios.c)31
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c18
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c14
-rw-r--r--drivers/platform/x86/dell-smbios.h27
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/platform/x86/intel-hid.c1
-rw-r--r--drivers/platform/x86/intel-vbtn.c47
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/stm32-vrefbuf.c2
-rw-r--r--drivers/s390/block/dasd.c21
-rw-r--r--drivers/s390/cio/device_fsm.c7
-rw-r--r--drivers/s390/cio/device_ops.c72
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c29
-rw-r--r--drivers/s390/net/qeth_l3.h34
-rw-r--r--drivers/s390/net/qeth_l3_main.c123
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c42
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c23
-rw-r--r--drivers/scsi/qedi/qedi_fw.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c17
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c35
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/soc/imx/gpc.c10
-rw-r--r--drivers/staging/android/ashmem.c23
-rw-r--r--drivers/staging/comedi/drivers.c3
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c21
-rw-r--r--drivers/tty/serial/atmel_serial.c1
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/tty_io.c9
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/params.c6
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c1
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c20
-rw-r--r--drivers/usb/host/xhci-dbgtty.c20
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c11
-rw-r--r--drivers/usb/host/xhci-rcar.c4
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h23
-rw-r--r--drivers/usb/mon/mon_text.c126
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c3
-rw-r--r--drivers/usb/typec/tcpm.c163
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c18
-rw-r--r--drivers/video/fbdev/sbuslib.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/watchdog/Kconfig4
-rw-r--r--drivers/watchdog/f71808e_wdt.c3
-rw-r--r--drivers/watchdog/hpwdt.c501
-rw-r--r--drivers/watchdog/sbsa_gwdt.c3
-rw-r--r--drivers/xen/events/events_base.c4
-rw-r--r--drivers/xen/pvcalls-back.c2
-rw-r--r--drivers/xen/pvcalls-front.c11
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c5
307 files changed, 2842 insertions, 2421 deletions
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 9180b9bd5821..834509506ef6 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = {
97static void malta_update(struct img_ascii_lcd_ctx *ctx) 97static void malta_update(struct img_ascii_lcd_ctx *ctx)
98{ 98{
99 unsigned int i; 99 unsigned int i;
100 int err; 100 int err = 0;
101 101
102 for (i = 0; i < ctx->cfg->num_chars; i++) { 102 for (i = 0; i < ctx->cfg->num_chars; i++) {
103 err = regmap_write(ctx->regmap, 103 err = regmap_write(ctx->regmap,
@@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
180static void sead3_update(struct img_ascii_lcd_ctx *ctx) 180static void sead3_update(struct img_ascii_lcd_ctx *ctx)
181{ 181{
182 unsigned int i; 182 unsigned int i;
183 int err; 183 int err = 0;
184 184
185 for (i = 0; i < ctx->cfg->num_chars; i++) { 185 for (i = 0; i < ctx->cfg->num_chars; i++) {
186 err = sead3_wait_lcd_idle(ctx); 186 err = sead3_wait_lcd_idle(ctx);
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
224 224
225/** 225/**
226 * img_ascii_lcd_scroll() - scroll the display by a character 226 * img_ascii_lcd_scroll() - scroll the display by a character
227 * @arg: really a pointer to the private data structure 227 * @t: really a pointer to the private data structure
228 * 228 *
229 * Scroll the current message along the LCD by one character, rearming the 229 * Scroll the current message along the LCD by one character, rearming the
230 * timer if required. 230 * timer if required.
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index ea7869c0d7f9..ec5e8800f8ad 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1372,7 +1372,7 @@ static void panel_process_inputs(void)
1372 break; 1372 break;
1373 input->rise_timer = 0; 1373 input->rise_timer = 0;
1374 input->state = INPUT_ST_RISING; 1374 input->state = INPUT_ST_RISING;
1375 /* no break here, fall through */ 1375 /* fall through */
1376 case INPUT_ST_RISING: 1376 case INPUT_ST_RISING:
1377 if ((phys_curr & input->mask) != input->value) { 1377 if ((phys_curr & input->mask) != input->value) {
1378 input->state = INPUT_ST_LOW; 1378 input->state = INPUT_ST_LOW;
@@ -1385,11 +1385,11 @@ static void panel_process_inputs(void)
1385 } 1385 }
1386 input->high_timer = 0; 1386 input->high_timer = 0;
1387 input->state = INPUT_ST_HIGH; 1387 input->state = INPUT_ST_HIGH;
1388 /* no break here, fall through */ 1388 /* fall through */
1389 case INPUT_ST_HIGH: 1389 case INPUT_ST_HIGH:
1390 if (input_state_high(input)) 1390 if (input_state_high(input))
1391 break; 1391 break;
1392 /* no break here, fall through */ 1392 /* fall through */
1393 case INPUT_ST_FALLING: 1393 case INPUT_ST_FALLING:
1394 input_state_falling(input); 1394 input_state_falling(input);
1395 } 1395 }
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index e5aa62fcf5a8..3aaf6af3ec23 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1758 if (unit[drive].type->code == FD_NODRIVE) 1758 if (unit[drive].type->code == FD_NODRIVE)
1759 return NULL; 1759 return NULL;
1760 *part = 0; 1760 *part = 0;
1761 return get_disk(unit[drive].gendisk); 1761 return get_disk_and_module(unit[drive].gendisk);
1762} 1762}
1763 1763
1764static int __init amiga_floppy_probe(struct platform_device *pdev) 1764static int __init amiga_floppy_probe(struct platform_device *pdev)
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 8bc3b9fd8dd2..dfb2c2622e5a 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1917 if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) 1917 if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
1918 return NULL; 1918 return NULL;
1919 *part = 0; 1919 *part = 0;
1920 return get_disk(unit[drive].disk); 1920 return get_disk_and_module(unit[drive].disk);
1921} 1921}
1922 1922
1923static int __init atari_floppy_init (void) 1923static int __init atari_floppy_init (void)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 8028a3a7e7fd..deea78e485da 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
456 456
457 mutex_lock(&brd_devices_mutex); 457 mutex_lock(&brd_devices_mutex);
458 brd = brd_init_one(MINOR(dev) / max_part, &new); 458 brd = brd_init_one(MINOR(dev) / max_part, &new);
459 kobj = brd ? get_disk(brd->brd_disk) : NULL; 459 kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
460 mutex_unlock(&brd_devices_mutex); 460 mutex_unlock(&brd_devices_mutex);
461 461
462 if (new) 462 if (new)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index eae484acfbbc..8ec7235fc93b 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4505 if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) 4505 if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
4506 return NULL; 4506 return NULL;
4507 *part = 0; 4507 *part = 0;
4508 return get_disk(disks[drive]); 4508 return get_disk_and_module(disks[drive]);
4509} 4509}
4510 4510
4511static int __init do_floppy_init(void) 4511static int __init do_floppy_init(void)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d5fe720cf149..ee62d2d517bf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
266 struct iov_iter i; 266 struct iov_iter i;
267 ssize_t bw; 267 ssize_t bw;
268 268
269 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 269 iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
270 270
271 file_start_write(file); 271 file_start_write(file);
272 bw = vfs_iter_write(file, &i, ppos, 0); 272 bw = vfs_iter_write(file, &i, ppos, 0);
@@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
1922 if (err < 0) 1922 if (err < 0)
1923 kobj = NULL; 1923 kobj = NULL;
1924 else 1924 else
1925 kobj = get_disk(lo->lo_disk); 1925 kobj = get_disk_and_module(lo->lo_disk);
1926 mutex_unlock(&loop_index_mutex); 1926 mutex_unlock(&loop_index_mutex);
1927 1927
1928 *part = 0; 1928 *part = 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5f2a4240a204..86258b00a1d4 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1591,7 +1591,7 @@ again:
1591 if (new_index < 0) { 1591 if (new_index < 0) {
1592 mutex_unlock(&nbd_index_mutex); 1592 mutex_unlock(&nbd_index_mutex);
1593 printk(KERN_ERR "nbd: failed to add new device\n"); 1593 printk(KERN_ERR "nbd: failed to add new device\n");
1594 return ret; 1594 return new_index;
1595 } 1595 }
1596 nbd = idr_find(&nbd_index_idr, new_index); 1596 nbd = idr_find(&nbd_index_idr, new_index);
1597 } 1597 }
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 531a0915066b..c61d20c9f3f8 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
1122 pkt->sector = new_sector; 1122 pkt->sector = new_sector;
1123 1123
1124 bio_reset(pkt->bio); 1124 bio_reset(pkt->bio);
1125 bio_set_set(pkt->bio, pd->bdev); 1125 bio_set_dev(pkt->bio, pd->bdev);
1126 bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); 1126 bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
1127 pkt->bio->bi_iter.bi_sector = new_sector; 1127 pkt->bio->bi_iter.bi_sector = new_sector;
1128 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; 1128 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 84434d3ea19b..64e066eba72e 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
799 return NULL; 799 return NULL;
800 800
801 *part = 0; 801 *part = 0;
802 return get_disk(swd->unit[drive].disk); 802 return get_disk_and_module(swd->unit[drive].disk);
803} 803}
804 804
805static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) 805static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e126e4cac2ca..92ec1bbece51 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
262 262
263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); 263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
264static void blkfront_gather_backend_features(struct blkfront_info *info); 264static void blkfront_gather_backend_features(struct blkfront_info *info);
265static int negotiate_mq(struct blkfront_info *info);
265 266
266static int get_id_from_freelist(struct blkfront_ring_info *rinfo) 267static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
267{ 268{
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
1774 unsigned int i, max_page_order; 1775 unsigned int i, max_page_order;
1775 unsigned int ring_page_order; 1776 unsigned int ring_page_order;
1776 1777
1778 if (!info)
1779 return -ENODEV;
1780
1777 max_page_order = xenbus_read_unsigned(info->xbdev->otherend, 1781 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1778 "max-ring-page-order", 0); 1782 "max-ring-page-order", 0);
1779 ring_page_order = min(xen_blkif_max_ring_order, max_page_order); 1783 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1780 info->nr_ring_pages = 1 << ring_page_order; 1784 info->nr_ring_pages = 1 << ring_page_order;
1781 1785
1786 err = negotiate_mq(info);
1787 if (err)
1788 goto destroy_blkring;
1789
1782 for (i = 0; i < info->nr_rings; i++) { 1790 for (i = 0; i < info->nr_rings; i++) {
1783 struct blkfront_ring_info *rinfo = &info->rinfo[i]; 1791 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1784 1792
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
1978 } 1986 }
1979 1987
1980 info->xbdev = dev; 1988 info->xbdev = dev;
1981 err = negotiate_mq(info);
1982 if (err) {
1983 kfree(info);
1984 return err;
1985 }
1986 1989
1987 mutex_init(&info->mutex); 1990 mutex_init(&info->mutex);
1988 info->vdevice = vdevice; 1991 info->vdevice = vdevice;
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
2099 2102
2100 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2103 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2101 2104
2102 err = negotiate_mq(info);
2103 if (err)
2104 return err;
2105
2106 err = talk_to_blkback(dev, info); 2105 err = talk_to_blkback(dev, info);
2107 if (!err) 2106 if (!err)
2108 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); 2107 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 41c95c9b2ab4..8f9130ab5887 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops =
332static struct kobject *z2_find(dev_t dev, int *part, void *data) 332static struct kobject *z2_find(dev_t dev, int *part, void *data)
333{ 333{
334 *part = 0; 334 *part = 0;
335 return get_disk(z2ram_gendisk); 335 return get_disk_and_module(z2ram_gendisk);
336} 336}
337 337
338static struct request_queue *z2_queue; 338static struct request_queue *z2_queue;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2a55380ad730..60bf04b8f103 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,6 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/dmi.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/usb.h> 26#include <linux/usb.h>
26#include <linux/usb/quirks.h> 27#include <linux/usb/quirks.h>
@@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = {
379 { } /* Terminating entry */ 380 { } /* Terminating entry */
380}; 381};
381 382
383/* The Bluetooth USB module build into some devices needs to be reset on resume,
384 * this is a problem with the platform (likely shutting off all power) not with
385 * the module itself. So we use a DMI list to match known broken platforms.
386 */
387static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
388 {
389 /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
390 .matches = {
391 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
392 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
393 },
394 },
395 {}
396};
397
382#define BTUSB_MAX_ISOC_FRAMES 10 398#define BTUSB_MAX_ISOC_FRAMES 10
383 399
384#define BTUSB_INTR_RUNNING 0 400#define BTUSB_INTR_RUNNING 0
@@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf,
2945 hdev->send = btusb_send_frame; 2961 hdev->send = btusb_send_frame;
2946 hdev->notify = btusb_notify; 2962 hdev->notify = btusb_notify;
2947 2963
2964 if (dmi_check_system(btusb_needs_reset_resume_table))
2965 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
2966
2948#ifdef CONFIG_PM 2967#ifdef CONFIG_PM
2949 err = btusb_config_oob_wake(hdev); 2968 err = btusb_config_oob_wake(hdev);
2950 if (err) 2969 if (err)
@@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf,
3031 if (id->driver_info & BTUSB_QCA_ROME) { 3050 if (id->driver_info & BTUSB_QCA_ROME) {
3032 data->setup_on_usb = btusb_setup_qca; 3051 data->setup_on_usb = btusb_setup_qca;
3033 hdev->set_bdaddr = btusb_set_bdaddr_ath3012; 3052 hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
3034
3035 /* QCA Rome devices lose their updated firmware over suspend,
3036 * but the USB hub doesn't notice any status change.
3037 * explicitly request a device reset on resume.
3038 */
3039 interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
3040 } 3053 }
3041 3054
3042#ifdef CONFIG_BT_HCIBTUSB_RTL 3055#ifdef CONFIG_BT_HCIBTUSB_RTL
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 0438a64b8185..6314dfb02969 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -922,12 +922,13 @@ static int bcm_get_resources(struct bcm_device *dev)
922 922
923 dev->clk = devm_clk_get(dev->dev, NULL); 923 dev->clk = devm_clk_get(dev->dev, NULL);
924 924
925 dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", 925 dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
926 GPIOD_OUT_LOW); 926 GPIOD_OUT_LOW);
927 if (IS_ERR(dev->device_wakeup)) 927 if (IS_ERR(dev->device_wakeup))
928 return PTR_ERR(dev->device_wakeup); 928 return PTR_ERR(dev->device_wakeup);
929 929
930 dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); 930 dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
931 GPIOD_OUT_LOW);
931 if (IS_ERR(dev->shutdown)) 932 if (IS_ERR(dev->shutdown))
932 return PTR_ERR(dev->shutdown); 933 return PTR_ERR(dev->shutdown);
933 934
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 4d46003c46cf..cdaeeea7999c 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata)
630 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { 630 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
631 prop = of_get_property(np, sysc_dts_quirks[i].name, &len); 631 prop = of_get_property(np, sysc_dts_quirks[i].name, &len);
632 if (!prop) 632 if (!prop)
633 break; 633 continue;
634 634
635 ddata->cfg.quirks |= sysc_dts_quirks[i].mask; 635 ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
636 } 636 }
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 4d1dc8b46877..f95b9c75175b 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
457 size_t count) 457 size_t count)
458{ 458{
459 int size = 0; 459 int size = 0;
460 int expected; 460 u32 expected;
461 461
462 if (!chip) 462 if (!chip)
463 return -EBUSY; 463 return -EBUSY;
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
474 } 474 }
475 475
476 expected = be32_to_cpu(*(__be32 *)(buf + 2)); 476 expected = be32_to_cpu(*(__be32 *)(buf + 2));
477 if (expected > count) { 477 if (expected > count || expected < TPM_HEADER_SIZE) {
478 size = -EIO; 478 size = -EIO;
479 goto out; 479 goto out;
480 } 480 }
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 76df4fbcf089..9e80a953d693 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
1190 break; 1190 break;
1191 1191
1192 recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); 1192 recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
1193 if (recd > num_bytes) {
1194 total = -EFAULT;
1195 break;
1196 }
1193 1197
1194 rlength = be32_to_cpu(tpm_cmd.header.out.length); 1198 rlength = be32_to_cpu(tpm_cmd.header.out.length);
1195 if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + 1199 if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c17e75348a99..a700f8f9ead7 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
683 if (!rc) { 683 if (!rc) {
684 data_len = be16_to_cpup( 684 data_len = be16_to_cpup(
685 (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); 685 (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
686 if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
687 rc = -EFAULT;
688 goto out;
689 }
686 690
687 rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) 691 rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
688 ->header.out.length); 692 ->header.out.length);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index c1dd39eaaeeb..6116cd05e228 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
473static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) 473static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
474{ 474{
475 int size = 0; 475 int size = 0;
476 int expected, status; 476 int status;
477 u32 expected;
477 478
478 if (count < TPM_HEADER_SIZE) { 479 if (count < TPM_HEADER_SIZE) {
479 size = -EIO; 480 size = -EIO;
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
488 } 489 }
489 490
490 expected = be32_to_cpu(*(__be32 *)(buf + 2)); 491 expected = be32_to_cpu(*(__be32 *)(buf + 2));
491 if ((size_t) expected > count) { 492 if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
492 size = -EIO; 493 size = -EIO;
493 goto out; 494 goto out;
494 } 495 }
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index c6428771841f..caa86b19c76d 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
281 struct device *dev = chip->dev.parent; 281 struct device *dev = chip->dev.parent;
282 struct i2c_client *client = to_i2c_client(dev); 282 struct i2c_client *client = to_i2c_client(dev);
283 s32 rc; 283 s32 rc;
284 int expected, status, burst_count, retries, size = 0; 284 int status;
285 int burst_count;
286 int retries;
287 int size = 0;
288 u32 expected;
285 289
286 if (count < TPM_HEADER_SIZE) { 290 if (count < TPM_HEADER_SIZE) {
287 i2c_nuvoton_ready(chip); /* return to idle */ 291 i2c_nuvoton_ready(chip); /* return to idle */
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
323 * to machine native 327 * to machine native
324 */ 328 */
325 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 329 expected = be32_to_cpu(*(__be32 *) (buf + 2));
326 if (expected > count) { 330 if (expected > count || expected < size) {
327 dev_err(dev, "%s() expected > count\n", __func__); 331 dev_err(dev, "%s() expected > count\n", __func__);
328 size = -EIO; 332 size = -EIO;
329 continue; 333 continue;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 183a5f54d875..da074e3db19b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
270{ 270{
271 struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); 271 struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
272 int size = 0; 272 int size = 0;
273 int expected, status; 273 int status;
274 u32 expected;
274 275
275 if (count < TPM_HEADER_SIZE) { 276 if (count < TPM_HEADER_SIZE) {
276 size = -EIO; 277 size = -EIO;
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
285 } 286 }
286 287
287 expected = be32_to_cpu(*(__be32 *) (buf + 2)); 288 expected = be32_to_cpu(*(__be32 *) (buf + 2));
288 if (expected > count) { 289 if (expected > count || expected < TPM_HEADER_SIZE) {
289 size = -EIO; 290 size = -EIO;
290 goto out; 291 goto out;
291 } 292 }
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index b3b4ed9b6874..d2e5382821a4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -386,6 +386,7 @@ config ATMEL_PIT
386 386
387config ATMEL_ST 387config ATMEL_ST
388 bool "Atmel ST timer support" if COMPILE_TEST 388 bool "Atmel ST timer support" if COMPILE_TEST
389 depends on HAS_IOMEM
389 select TIMER_OF 390 select TIMER_OF
390 select MFD_SYSCON 391 select MFD_SYSCON
391 help 392 help
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 4927355f9cbe..471b428d8034 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -251,9 +251,14 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
251 int irq_reenable = clockevent_state_periodic(evt); 251 int irq_reenable = clockevent_state_periodic(evt);
252 252
253 /* 253 /*
254 * Any write to CTRL reg ACks the interrupt, we rewrite the 254 * 1. ACK the interrupt
255 * Count when [N]ot [H]alted bit. 255 * - For ARC700, any write to CTRL reg ACKs it, so just rewrite
256 * And re-arm it if perioid by [I]nterrupt [E]nable bit 256 * Count when [N]ot [H]alted bit.
257 * - For HS3x, it is a bit subtle. On taken count-down interrupt,
258 * IP bit [3] is set, which needs to be cleared for ACK'ing.
259 * The write below can only update the other two bits, hence
260 * explicitly clears IP bit
261 * 2. Re-arm interrupt if periodic by writing to IE bit [0]
257 */ 262 */
258 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); 263 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
259 264
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 3ee7e6fea621..846d18daf893 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name,
281 281
282static unsigned long __init ftm_clk_init(struct device_node *np) 282static unsigned long __init ftm_clk_init(struct device_node *np)
283{ 283{
284 unsigned long freq; 284 long freq;
285 285
286 freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); 286 freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt");
287 if (freq <= 0) 287 if (freq <= 0)
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 65e18c86d9b9..986b6796b631 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -166,7 +166,7 @@ static int __init __gic_clocksource_init(void)
166 166
167 /* Set clocksource mask. */ 167 /* Set clocksource mask. */
168 count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; 168 count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
169 count_width >>= __fls(GIC_CONFIG_COUNTBITS); 169 count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
170 count_width *= 4; 170 count_width *= 4;
171 count_width += 32; 171 count_width += 32;
172 gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); 172 gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 3a88e33b0cfe..fb586e09682d 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ
44 44
45config ARM_SCPI_CPUFREQ 45config ARM_SCPI_CPUFREQ
46 tristate "SCPI based CPUfreq driver" 46 tristate "SCPI based CPUfreq driver"
47 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI 47 depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
48 help 48 help
49 This adds the CPUfreq driver support for ARM big.LITTLE platforms 49 This adds the CPUfreq driver support for ARM platforms using SCPI
50 using SCPI protocol for CPU power management. 50 protocol for CPU power management.
51 51
52 This driver uses SCPI Message Protocol driver to interact with the 52 This driver uses SCPI Message Protocol driver to interact with the
53 firmware providing the CPU DVFS functionality. 53 firmware providing the CPU DVFS functionality.
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 7b596fa38ad2..6bebc1f9f55a 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
351static int s3c_cpufreq_init(struct cpufreq_policy *policy) 351static int s3c_cpufreq_init(struct cpufreq_policy *policy)
352{ 352{
353 policy->clk = clk_arm; 353 policy->clk = clk_arm;
354 return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); 354
355 policy->cpuinfo.transition_latency = cpu_cur.info->latency;
356
357 if (ftab)
358 return cpufreq_table_validate_and_show(policy, ftab);
359
360 return 0;
355} 361}
356 362
357static int __init s3c_cpufreq_initclks(void) 363static int __init s3c_cpufreq_initclks(void)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index c32a833e1b00..d300a163945f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
51static int 51static int
52scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) 52scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
53{ 53{
54 unsigned long freq = policy->freq_table[index].frequency;
54 struct scpi_data *priv = policy->driver_data; 55 struct scpi_data *priv = policy->driver_data;
55 u64 rate = policy->freq_table[index].frequency * 1000; 56 u64 rate = freq * 1000;
56 int ret; 57 int ret;
57 58
58 ret = clk_set_rate(priv->clk, rate); 59 ret = clk_set_rate(priv->clk, rate);
59 if (!ret && (clk_get_rate(priv->clk) != rate))
60 ret = -EIO;
61 60
62 return ret; 61 if (ret)
62 return ret;
63
64 if (clk_get_rate(priv->clk) != rate)
65 return -EIO;
66
67 arch_set_freq_scale(policy->related_cpus, freq,
68 policy->cpuinfo.max_freq);
69
70 return 0;
63} 71}
64 72
65static int 73static int
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fcfa5b1eae61..b3afb6cc9d72 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error)
211{ 211{
212 int ret; 212 int ret;
213 213
214 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error); 214 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
215 if (ret) 215 if (ret)
216 return ret; 216 return ret;
217 217
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
271 return rc; 271 return rc;
272 } 272 }
273 273
274 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error); 274 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
275} 275}
276 276
277static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) 277static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
299 return rc; 299 return rc;
300 } 300 }
301 301
302 return __sev_do_cmd_locked(cmd, 0, &argp->error); 302 return __sev_do_cmd_locked(cmd, NULL, &argp->error);
303} 303}
304 304
305static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) 305static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission);
624 624
625int sev_guest_df_flush(int *error) 625int sev_guest_df_flush(int *error)
626{ 626{
627 return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error); 627 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
628} 628}
629EXPORT_SYMBOL_GPL(sev_guest_df_flush); 629EXPORT_SYMBOL_GPL(sev_guest_df_flush);
630 630
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 473af694ad1c..ecdc292aa4e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -246,12 +246,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
246{ 246{
247 long avail; 247 long avail;
248 248
249 /*
250 * The device driver is allowed to sleep, in order to make the
251 * memory directly accessible.
252 */
253 might_sleep();
254
255 if (!dax_dev) 249 if (!dax_dev)
256 return -EOPNOTSUPP; 250 return -EOPNOTSUPP;
257 251
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f652a0e0f5a2..3548caa9e933 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -163,6 +163,7 @@ struct mv_xor_v2_device {
163 void __iomem *dma_base; 163 void __iomem *dma_base;
164 void __iomem *glob_base; 164 void __iomem *glob_base;
165 struct clk *clk; 165 struct clk *clk;
166 struct clk *reg_clk;
166 struct tasklet_struct irq_tasklet; 167 struct tasklet_struct irq_tasklet;
167 struct list_head free_sw_desc; 168 struct list_head free_sw_desc;
168 struct dma_device dmadev; 169 struct dma_device dmadev;
@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
749 if (ret) 750 if (ret)
750 return ret; 751 return ret;
751 752
753 xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
754 if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
755 if (!IS_ERR(xor_dev->reg_clk)) {
756 ret = clk_prepare_enable(xor_dev->reg_clk);
757 if (ret)
758 return ret;
759 } else {
760 return PTR_ERR(xor_dev->reg_clk);
761 }
762 }
763
752 xor_dev->clk = devm_clk_get(&pdev->dev, NULL); 764 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
753 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) 765 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
754 return -EPROBE_DEFER; 766 ret = EPROBE_DEFER;
767 goto disable_reg_clk;
768 }
755 if (!IS_ERR(xor_dev->clk)) { 769 if (!IS_ERR(xor_dev->clk)) {
756 ret = clk_prepare_enable(xor_dev->clk); 770 ret = clk_prepare_enable(xor_dev->clk);
757 if (ret) 771 if (ret)
758 return ret; 772 goto disable_reg_clk;
759 } 773 }
760 774
761 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, 775 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
@@ -866,8 +880,9 @@ free_hw_desq:
866free_msi_irqs: 880free_msi_irqs:
867 platform_msi_domain_free_irqs(&pdev->dev); 881 platform_msi_domain_free_irqs(&pdev->dev);
868disable_clk: 882disable_clk:
869 if (!IS_ERR(xor_dev->clk)) 883 clk_disable_unprepare(xor_dev->clk);
870 clk_disable_unprepare(xor_dev->clk); 884disable_reg_clk:
885 clk_disable_unprepare(xor_dev->reg_clk);
871 return ret; 886 return ret;
872} 887}
873 888
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index e3ff162c03fc..d0cacdb0713e 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
917 917
918 rcar_dmac_chan_configure_desc(chan, desc); 918 rcar_dmac_chan_configure_desc(chan, desc);
919 919
920 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; 920 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
921 921
922 /* 922 /*
923 * Allocate and fill the transfer chunk descriptors. We own the only 923 * Allocate and fill the transfer chunk descriptors. We own the only
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f34430f99fd8..872100215ca0 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
279 * sbridge structs 279 * sbridge structs
280 */ 280 */
281 281
282#define NUM_CHANNELS 4 /* Max channels per MC */ 282#define NUM_CHANNELS 6 /* Max channels per MC */
283#define MAX_DIMMS 3 /* Max DIMMS per channel */ 283#define MAX_DIMMS 3 /* Max DIMMS per channel */
284#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ 284#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
285#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ 285#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index c16600f30611..0bdea60c65dd 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void)
639 platform_driver_unregister(&dcdbas_driver); 639 platform_driver_unregister(&dcdbas_driver);
640} 640}
641 641
642module_init(dcdbas_init); 642subsys_initcall_sync(dcdbas_init);
643module_exit(dcdbas_exit); 643module_exit(dcdbas_exit);
644 644
645MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); 645MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index da661bf8cb96..13c1edd37e96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
68 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; 68 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
69 efi_status_t status; 69 efi_status_t status;
70 efi_physical_addr_t log_location, log_last_entry; 70 efi_physical_addr_t log_location, log_last_entry;
71 struct linux_efi_tpm_eventlog *log_tbl; 71 struct linux_efi_tpm_eventlog *log_tbl = NULL;
72 unsigned long first_entry_addr, last_entry_addr; 72 unsigned long first_entry_addr, last_entry_addr;
73 size_t log_size, last_entry_size; 73 size_t log_size, last_entry_size;
74 efi_bool_t truncated; 74 efi_bool_t truncated;
75 void *tcg2_protocol; 75 void *tcg2_protocol = NULL;
76 76
77 status = efi_call_early(locate_protocol, &tcg2_guid, NULL, 77 status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
78 &tcg2_protocol); 78 &tcg2_protocol);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e76de57dd617..ebaea8b1594b 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -14,7 +14,6 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/clk.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/gpio.h> 18#include <linux/gpio.h>
20#include <linux/init.h> 19#include <linux/init.h>
@@ -37,10 +36,9 @@ struct gpio_rcar_priv {
37 struct platform_device *pdev; 36 struct platform_device *pdev;
38 struct gpio_chip gpio_chip; 37 struct gpio_chip gpio_chip;
39 struct irq_chip irq_chip; 38 struct irq_chip irq_chip;
40 struct clk *clk;
41 unsigned int irq_parent; 39 unsigned int irq_parent;
40 atomic_t wakeup_path;
42 bool has_both_edge_trigger; 41 bool has_both_edge_trigger;
43 bool needs_clk;
44}; 42};
45 43
46#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ 44#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
@@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
186 } 184 }
187 } 185 }
188 186
189 if (!p->clk)
190 return 0;
191
192 if (on) 187 if (on)
193 clk_enable(p->clk); 188 atomic_inc(&p->wakeup_path);
194 else 189 else
195 clk_disable(p->clk); 190 atomic_dec(&p->wakeup_path);
196 191
197 return 0; 192 return 0;
198} 193}
@@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
330 325
331struct gpio_rcar_info { 326struct gpio_rcar_info {
332 bool has_both_edge_trigger; 327 bool has_both_edge_trigger;
333 bool needs_clk;
334}; 328};
335 329
336static const struct gpio_rcar_info gpio_rcar_info_gen1 = { 330static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
337 .has_both_edge_trigger = false, 331 .has_both_edge_trigger = false,
338 .needs_clk = false,
339}; 332};
340 333
341static const struct gpio_rcar_info gpio_rcar_info_gen2 = { 334static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
342 .has_both_edge_trigger = true, 335 .has_both_edge_trigger = true,
343 .needs_clk = true,
344}; 336};
345 337
346static const struct of_device_id gpio_rcar_of_table[] = { 338static const struct of_device_id gpio_rcar_of_table[] = {
@@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
403 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); 395 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
404 *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; 396 *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
405 p->has_both_edge_trigger = info->has_both_edge_trigger; 397 p->has_both_edge_trigger = info->has_both_edge_trigger;
406 p->needs_clk = info->needs_clk;
407 398
408 if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { 399 if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
409 dev_warn(&p->pdev->dev, 400 dev_warn(&p->pdev->dev,
@@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
440 431
441 platform_set_drvdata(pdev, p); 432 platform_set_drvdata(pdev, p);
442 433
443 p->clk = devm_clk_get(dev, NULL);
444 if (IS_ERR(p->clk)) {
445 if (p->needs_clk) {
446 dev_err(dev, "unable to get clock\n");
447 ret = PTR_ERR(p->clk);
448 goto err0;
449 }
450 p->clk = NULL;
451 }
452
453 pm_runtime_enable(dev); 434 pm_runtime_enable(dev);
454 435
455 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 436 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev)
531 return 0; 512 return 0;
532} 513}
533 514
515static int __maybe_unused gpio_rcar_suspend(struct device *dev)
516{
517 struct gpio_rcar_priv *p = dev_get_drvdata(dev);
518
519 if (atomic_read(&p->wakeup_path))
520 device_set_wakeup_path(dev);
521
522 return 0;
523}
524
525static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL);
526
534static struct platform_driver gpio_rcar_device_driver = { 527static struct platform_driver gpio_rcar_device_driver = {
535 .probe = gpio_rcar_probe, 528 .probe = gpio_rcar_probe,
536 .remove = gpio_rcar_remove, 529 .remove = gpio_rcar_remove,
537 .driver = { 530 .driver = {
538 .name = "gpio_rcar", 531 .name = "gpio_rcar",
532 .pm = &gpio_rcar_pm_ops,
539 .of_match_table = of_match_ptr(gpio_rcar_of_table), 533 .of_match_table = of_match_ptr(gpio_rcar_of_table),
540 } 534 }
541}; 535};
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 564bb7a31da4..84e5a9df2344 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
241 241
242 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, 242 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
243 &of_flags); 243 &of_flags);
244 /*
245 * -EPROBE_DEFER in our case means that we found a
246 * valid GPIO property, but no controller has been
247 * registered so far.
248 *
249 * This means we don't need to look any further for
250 * alternate name conventions, and we should really
251 * preserve the return code for our user to be able to
252 * retry probing later.
253 */
254 if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
255 return desc;
256
244 if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) 257 if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
245 break; 258 break;
246 } 259 }
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
250 desc = of_find_spi_gpio(dev, con_id, &of_flags); 263 desc = of_find_spi_gpio(dev, con_id, &of_flags);
251 264
252 /* Special handling for regulator GPIOs if used */ 265 /* Special handling for regulator GPIOs if used */
253 if (IS_ERR(desc)) 266 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
254 desc = of_find_regulator_gpio(dev, con_id, &of_flags); 267 desc = of_find_regulator_gpio(dev, con_id, &of_flags);
255 268
256 if (IS_ERR(desc)) 269 if (IS_ERR(desc))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d5a2eefd6c3e..74edba18b159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1156/* 1156/*
1157 * Writeback 1157 * Writeback
1158 */ 1158 */
1159#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */ 1159#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
1160 1160
1161struct amdgpu_wb { 1161struct amdgpu_wb {
1162 struct amdgpu_bo *wb_obj; 1162 struct amdgpu_bo *wb_obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57afad79f55d..8fa850a070e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
540 size_t size; 540 size_t size;
541 u32 retry = 3; 541 u32 retry = 3;
542 542
543 if (amdgpu_acpi_pcie_notify_device_ready(adev))
544 return -EINVAL;
545
543 /* Get the device handle */ 546 /* Get the device handle */
544 handle = ACPI_HANDLE(&adev->pdev->dev); 547 handle = ACPI_HANDLE(&adev->pdev->dev);
545 if (!handle) 548 if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 74d2efaec52f..7a073ac5f9c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 00a50cc5ec9a..af1b879a9ee9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
492 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 492 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
493 493
494 /* clear wb memory */ 494 /* clear wb memory */
495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); 495 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
496 } 496 }
497 497
498 return 0; 498 return 0;
@@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
530 */ 530 */
531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) 531void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
532{ 532{
533 wb >>= 3;
533 if (wb < adev->wb.num_wb) 534 if (wb < adev->wb.num_wb)
534 __clear_bit(wb >> 3, adev->wb.used); 535 __clear_bit(wb, adev->wb.used);
535} 536}
536 537
537/** 538/**
@@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1455 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1456 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1456 if (!adev->ip_blocks[i].status.hw) 1457 if (!adev->ip_blocks[i].status.hw)
1457 continue; 1458 continue;
1458 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1459 amdgpu_free_static_csa(adev);
1460 amdgpu_device_wb_fini(adev);
1461 amdgpu_device_vram_scratch_fini(adev);
1462 }
1463 1459
1464 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1460 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1465 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { 1461 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
@@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1486 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1482 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1487 if (!adev->ip_blocks[i].status.sw) 1483 if (!adev->ip_blocks[i].status.sw)
1488 continue; 1484 continue;
1485
1486 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1487 amdgpu_free_static_csa(adev);
1488 amdgpu_device_wb_fini(adev);
1489 amdgpu_device_vram_scratch_fini(adev);
1490 }
1491
1489 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); 1492 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1490 /* XXX handle errors */ 1493 /* XXX handle errors */
1491 if (r) { 1494 if (r) {
@@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2284 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 2287 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
2285 } 2288 }
2286 drm_modeset_unlock_all(dev); 2289 drm_modeset_unlock_all(dev);
2287 } else {
2288 /*
2289 * There is no equivalent atomic helper to turn on
2290 * display, so we defined our own function for this,
2291 * once suspend resume is supported by the atomic
2292 * framework this will be reworked
2293 */
2294 amdgpu_dm_display_resume(adev);
2295 } 2290 }
2296 } 2291 }
2297 2292
@@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
2726 if (amdgpu_device_has_dc_support(adev)) { 2721 if (amdgpu_device_has_dc_support(adev)) {
2727 if (drm_atomic_helper_resume(adev->ddev, state)) 2722 if (drm_atomic_helper_resume(adev->ddev, state))
2728 dev_info(adev->dev, "drm resume failed:%d\n", r); 2723 dev_info(adev->dev, "drm resume failed:%d\n", r);
2729 amdgpu_dm_display_resume(adev);
2730 } else { 2724 } else {
2731 drm_helper_resume_force_mode(adev->ddev); 2725 drm_helper_resume_force_mode(adev->ddev);
2732 } 2726 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e48b4ec88c8c..ca6c931dabfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37 37
38 if (robj) { 38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 amdgpu_mn_unregister(robj); 39 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj); 40 amdgpu_bo_unref(&robj);
43 } 41 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index e14ab34d8262..7c2be32c5aea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
75static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) 75static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
76{ 76{
77 struct amdgpu_gtt_mgr *mgr = man->priv; 77 struct amdgpu_gtt_mgr *mgr = man->priv;
78 78 spin_lock(&mgr->lock);
79 drm_mm_takedown(&mgr->mm); 79 drm_mm_takedown(&mgr->mm);
80 spin_unlock(&mgr->lock); 80 spin_unlock(&mgr->lock);
81 kfree(mgr); 81 kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 56bcd59c3399..36483e0d3c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
257 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 257 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
258 if (r) { 258 if (r) {
259 adev->irq.installed = false; 259 adev->irq.installed = false;
260 flush_work(&adev->hotplug_work); 260 if (!amdgpu_device_has_dc_support(adev))
261 flush_work(&adev->hotplug_work);
261 cancel_work_sync(&adev->reset_work); 262 cancel_work_sync(&adev->reset_work);
262 return r; 263 return r;
263 } 264 }
@@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
282 adev->irq.installed = false; 283 adev->irq.installed = false;
283 if (adev->irq.msi_enabled) 284 if (adev->irq.msi_enabled)
284 pci_disable_msi(adev->pdev); 285 pci_disable_msi(adev->pdev);
285 flush_work(&adev->hotplug_work); 286 if (!amdgpu_device_has_dc_support(adev))
287 flush_work(&adev->hotplug_work);
286 cancel_work_sync(&adev->reset_work); 288 cancel_work_sync(&adev->reset_work);
287 } 289 }
288 290
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 54f06c959340..2264c5c97009 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -352,6 +352,7 @@ struct amdgpu_mode_info {
352 u16 firmware_flags; 352 u16 firmware_flags;
353 /* pointer to backlight encoder */ 353 /* pointer to backlight encoder */
354 struct amdgpu_encoder *bl_encoder; 354 struct amdgpu_encoder *bl_encoder;
355 u8 bl_level; /* saved backlight level */
355 struct amdgpu_audio audio; /* audio stuff */ 356 struct amdgpu_audio audio; /* audio stuff */
356 int num_crtc; /* number of crtcs */ 357 int num_crtc; /* number of crtcs */
357 int num_hpd; /* number of hpd pins */ 358 int num_hpd; /* number of hpd pins */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5c4c3e0d527b..1220322c1680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
56 56
57 amdgpu_bo_kunmap(bo); 57 amdgpu_bo_kunmap(bo);
58 58
59 if (bo->gem_base.import_attach)
60 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
59 drm_gem_object_release(&bo->gem_base); 61 drm_gem_object_release(&bo->gem_base);
60 amdgpu_bo_unref(&bo->parent); 62 amdgpu_bo_unref(&bo->parent);
61 if (!list_empty(&bo->shadow_list)) { 63 if (!list_empty(&bo->shadow_list)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13044e66dcaf..561d3312af32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
481 result = 0; 481 result = 0;
482 482
483 if (*pos < 12) { 483 if (*pos < 12) {
484 early[0] = amdgpu_ring_get_rptr(ring); 484 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; 485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
486 early[2] = ring->wptr & ring->buf_mask; 486 early[2] = ring->wptr & ring->buf_mask;
487 for (i = *pos / 4; i < 3 && size; i++) { 487 for (i = *pos / 4; i < 3 && size; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2eae86bf906..5c26a8e806b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
299 299
300 cancel_delayed_work_sync(&adev->uvd.idle_work); 300 cancel_delayed_work_sync(&adev->uvd.idle_work);
301 301
302 for (i = 0; i < adev->uvd.max_handles; ++i) 302 /* only valid for physical mode */
303 if (atomic_read(&adev->uvd.handles[i])) 303 if (adev->asic_type < CHIP_POLARIS10) {
304 break; 304 for (i = 0; i < adev->uvd.max_handles; ++i)
305 if (atomic_read(&adev->uvd.handles[i]))
306 break;
305 307
306 if (i == AMDGPU_MAX_UVD_HANDLES) 308 if (i == adev->uvd.max_handles)
307 return 0; 309 return 0;
310 }
308 311
309 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 312 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
310 ptr = adev->uvd.cpu_addr; 313 ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include "bif/bif_4_1_d.h" 35#include "bif/bif_4_1_d.h"
36 36
37static u8 37u8
38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) 38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
39{ 39{
40 u8 backlight_level; 40 u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
48 return backlight_level; 48 return backlight_level;
49} 49}
50 50
51static void 51void
52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, 52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
53 u8 backlight_level) 53 u8 backlight_level)
54{ 54{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
25#define __ATOMBIOS_ENCODER_H__ 25#define __ATOMBIOS_ENCODER_H__
26 26
27u8 27u8
28amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
29void
30amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
31 u8 backlight_level);
32u8
28amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); 33amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
29void 34void
30amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 35amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f34bc68aadfb..022f303463fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)
2921 2921
2922static int dce_v10_0_suspend(void *handle) 2922static int dce_v10_0_suspend(void *handle)
2923{ 2923{
2924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2925
2926 adev->mode_info.bl_level =
2927 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2928
2924 return dce_v10_0_hw_fini(handle); 2929 return dce_v10_0_hw_fini(handle);
2925} 2930}
2926 2931
@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930 int ret; 2935 int ret;
2931 2936
2937 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2938 adev->mode_info.bl_level);
2939
2932 ret = dce_v10_0_hw_init(handle); 2940 ret = dce_v10_0_hw_init(handle);
2933 2941
2934 /* turn on the BL */ 2942 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 26378bd6aba4..800a9f36ab4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)
3047 3047
3048static int dce_v11_0_suspend(void *handle) 3048static int dce_v11_0_suspend(void *handle)
3049{ 3049{
3050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3051
3052 adev->mode_info.bl_level =
3053 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
3054
3050 return dce_v11_0_hw_fini(handle); 3055 return dce_v11_0_hw_fini(handle);
3051} 3056}
3052 3057
@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)
3055 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3056 int ret; 3061 int ret;
3057 3062
3063 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3064 adev->mode_info.bl_level);
3065
3058 ret = dce_v11_0_hw_init(handle); 3066 ret = dce_v11_0_hw_init(handle);
3059 3067
3060 /* turn on the BL */ 3068 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index bd2c4f727df6..b8368f69ce1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)
2787 2787
2788static int dce_v6_0_suspend(void *handle) 2788static int dce_v6_0_suspend(void *handle)
2789{ 2789{
2790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2791
2792 adev->mode_info.bl_level =
2793 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2794
2790 return dce_v6_0_hw_fini(handle); 2795 return dce_v6_0_hw_fini(handle);
2791} 2796}
2792 2797
@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)
2795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2800 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2796 int ret; 2801 int ret;
2797 2802
2803 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2804 adev->mode_info.bl_level);
2805
2798 ret = dce_v6_0_hw_init(handle); 2806 ret = dce_v6_0_hw_init(handle);
2799 2807
2800 /* turn on the BL */ 2808 /* turn on the BL */
@@ -3093,7 +3101,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3093 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3101 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3094 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3102 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3095 schedule_work(&adev->hotplug_work); 3103 schedule_work(&adev->hotplug_work);
3096 DRM_INFO("IH: HPD%d\n", hpd + 1); 3104 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3097 } 3105 }
3098 3106
3099 return 0; 3107 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c008dc030687..012e0a9ae0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)
2819 2819
2820static int dce_v8_0_suspend(void *handle) 2820static int dce_v8_0_suspend(void *handle)
2821{ 2821{
2822 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2823
2824 adev->mode_info.bl_level =
2825 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2826
2822 return dce_v8_0_hw_fini(handle); 2827 return dce_v8_0_hw_fini(handle);
2823} 2828}
2824 2829
@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)
2827 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2828 int ret; 2833 int ret;
2829 2834
2835 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2836 adev->mode_info.bl_level);
2837
2830 ret = dce_v8_0_hw_init(handle); 2838 ret = dce_v8_0_hw_init(handle);
2831 2839
2832 /* turn on the BL */ 2840 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a066c5eda135..a4309698e76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4384 case CHIP_KAVERI: 4384 case CHIP_KAVERI:
4385 adev->gfx.config.max_shader_engines = 1; 4385 adev->gfx.config.max_shader_engines = 1;
4386 adev->gfx.config.max_tile_pipes = 4; 4386 adev->gfx.config.max_tile_pipes = 4;
4387 if ((adev->pdev->device == 0x1304) || 4387 adev->gfx.config.max_cu_per_sh = 8;
4388 (adev->pdev->device == 0x1305) || 4388 adev->gfx.config.max_backends_per_se = 2;
4389 (adev->pdev->device == 0x130C) ||
4390 (adev->pdev->device == 0x130F) ||
4391 (adev->pdev->device == 0x1310) ||
4392 (adev->pdev->device == 0x1311) ||
4393 (adev->pdev->device == 0x131C)) {
4394 adev->gfx.config.max_cu_per_sh = 8;
4395 adev->gfx.config.max_backends_per_se = 2;
4396 } else if ((adev->pdev->device == 0x1309) ||
4397 (adev->pdev->device == 0x130A) ||
4398 (adev->pdev->device == 0x130D) ||
4399 (adev->pdev->device == 0x1313) ||
4400 (adev->pdev->device == 0x131D)) {
4401 adev->gfx.config.max_cu_per_sh = 6;
4402 adev->gfx.config.max_backends_per_se = 2;
4403 } else if ((adev->pdev->device == 0x1306) ||
4404 (adev->pdev->device == 0x1307) ||
4405 (adev->pdev->device == 0x130B) ||
4406 (adev->pdev->device == 0x130E) ||
4407 (adev->pdev->device == 0x1315) ||
4408 (adev->pdev->device == 0x131B)) {
4409 adev->gfx.config.max_cu_per_sh = 4;
4410 adev->gfx.config.max_backends_per_se = 1;
4411 } else {
4412 adev->gfx.config.max_cu_per_sh = 3;
4413 adev->gfx.config.max_backends_per_se = 1;
4414 }
4415 adev->gfx.config.max_sh_per_se = 1; 4389 adev->gfx.config.max_sh_per_se = 1;
4416 adev->gfx.config.max_texture_channel_caches = 4; 4390 adev->gfx.config.max_texture_channel_caches = 4;
4417 adev->gfx.config.max_gprs = 256; 4391 adev->gfx.config.max_gprs = 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2719937e09d6..3b7e7af09ead 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle)
634 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) 634 for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
635 BUG_ON(vm_inv_eng[i] > 16); 635 BUG_ON(vm_inv_eng[i] > 16);
636 636
637 if (adev->asic_type == CHIP_VEGA10) { 637 if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
638 r = gmc_v9_0_ecc_available(adev); 638 r = gmc_v9_0_ecc_available(adev);
639 if (r == 1) { 639 if (r == 1) {
640 DRM_INFO("ECC is active.\n"); 640 DRM_INFO("ECC is active.\n");
@@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
682 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); 682 adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
683 if (!adev->mc.vram_width) { 683 if (!adev->mc.vram_width) {
684 /* hbm memory channel size */ 684 /* hbm memory channel size */
685 chansize = 128; 685 if (adev->flags & AMD_IS_APU)
686 chansize = 64;
687 else
688 chansize = 128;
686 689
687 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); 690 tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
688 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; 691 tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e92fb372bc99..91cf95a8c39c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
238static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) 238static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
239{ 239{
240 struct amdgpu_device *adev = ring->adev; 240 struct amdgpu_device *adev = ring->adev;
241 u64 *wptr = NULL; 241 u64 wptr;
242 uint64_t local_wptr = 0;
243 242
244 if (ring->use_doorbell) { 243 if (ring->use_doorbell) {
245 /* XXX check if swapping is necessary on BE */ 244 /* XXX check if swapping is necessary on BE */
246 wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); 245 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
247 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); 246 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
248 *wptr = (*wptr) >> 2;
249 DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
250 } else { 247 } else {
251 u32 lowbit, highbit; 248 u32 lowbit, highbit;
252 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; 249 int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
253 250
254 wptr = &local_wptr;
255 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2; 251 lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
256 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; 252 highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
257 253
258 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", 254 DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
259 me, highbit, lowbit); 255 me, highbit, lowbit);
260 *wptr = highbit; 256 wptr = highbit;
261 *wptr = (*wptr) << 32; 257 wptr = wptr << 32;
262 *wptr |= lowbit; 258 wptr |= lowbit;
263 } 259 }
264 260
265 return *wptr; 261 return wptr >> 2;
266} 262}
267 263
268/** 264/**
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 543101d5a5ed..2095173aaabf 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -31,6 +31,7 @@
31#include "amdgpu_uvd.h" 31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34#include "amdgpu_powerplay.h" 35#include "amdgpu_powerplay.h"
35#include "sid.h" 36#include "sid.h"
36#include "si_ih.h" 37#include "si_ih.h"
@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1461{ 1462{
1462 struct pci_dev *root = adev->pdev->bus->self; 1463 struct pci_dev *root = adev->pdev->bus->self;
1463 int bridge_pos, gpu_pos; 1464 int bridge_pos, gpu_pos;
1464 u32 speed_cntl, mask, current_data_rate; 1465 u32 speed_cntl, current_data_rate;
1465 int ret, i; 1466 int i;
1466 u16 tmp16; 1467 u16 tmp16;
1467 1468
1468 if (pci_is_root_bus(adev->pdev->bus)) 1469 if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1474 if (adev->flags & AMD_IS_APU) 1475 if (adev->flags & AMD_IS_APU)
1475 return; 1476 return;
1476 1477
1477 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1478 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1478 if (ret != 0) 1479 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1479 return;
1480
1481 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1482 return; 1480 return;
1483 1481
1484 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 1482 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1485 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> 1483 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
1486 LC_CURRENT_DATA_RATE_SHIFT; 1484 LC_CURRENT_DATA_RATE_SHIFT;
1487 if (mask & DRM_PCIE_SPEED_80) { 1485 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1488 if (current_data_rate == 2) { 1486 if (current_data_rate == 2) {
1489 DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1487 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
1490 return; 1488 return;
1491 } 1489 }
1492 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1490 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
1493 } else if (mask & DRM_PCIE_SPEED_50) { 1491 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
1494 if (current_data_rate == 1) { 1492 if (current_data_rate == 1) {
1495 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1493 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
1496 return; 1494 return;
@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1506 if (!gpu_pos) 1504 if (!gpu_pos)
1507 return; 1505 return;
1508 1506
1509 if (mask & DRM_PCIE_SPEED_80) { 1507 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1510 if (current_data_rate != 2) { 1508 if (current_data_rate != 2) {
1511 u16 bridge_cfg, gpu_cfg; 1509 u16 bridge_cfg, gpu_cfg;
1512 u16 bridge_cfg2, gpu_cfg2; 1510 u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1589 1587
1590 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); 1588 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1591 tmp16 &= ~0xf; 1589 tmp16 &= ~0xf;
1592 if (mask & DRM_PCIE_SPEED_80) 1590 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1593 tmp16 |= 3; 1591 tmp16 |= 3;
1594 else if (mask & DRM_PCIE_SPEED_50) 1592 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1595 tmp16 |= 2; 1593 tmp16 |= 2;
1596 else 1594 else
1597 tmp16 |= 1; 1595 tmp16 |= 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index ce675a7f179a..22f0b7ff3ac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -26,6 +26,7 @@
26#include "amdgpu_pm.h" 26#include "amdgpu_pm.h"
27#include "amdgpu_dpm.h" 27#include "amdgpu_dpm.h"
28#include "amdgpu_atombios.h" 28#include "amdgpu_atombios.h"
29#include "amd_pcie.h"
29#include "sid.h" 30#include "sid.h"
30#include "r600_dpm.h" 31#include "r600_dpm.h"
31#include "si_dpm.h" 32#include "si_dpm.h"
@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3331 } 3332 }
3332} 3333}
3333 3334
3334static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
3335 u32 sys_mask,
3336 enum amdgpu_pcie_gen asic_gen,
3337 enum amdgpu_pcie_gen default_gen)
3338{
3339 switch (asic_gen) {
3340 case AMDGPU_PCIE_GEN1:
3341 return AMDGPU_PCIE_GEN1;
3342 case AMDGPU_PCIE_GEN2:
3343 return AMDGPU_PCIE_GEN2;
3344 case AMDGPU_PCIE_GEN3:
3345 return AMDGPU_PCIE_GEN3;
3346 default:
3347 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
3348 return AMDGPU_PCIE_GEN3;
3349 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
3350 return AMDGPU_PCIE_GEN2;
3351 else
3352 return AMDGPU_PCIE_GEN1;
3353 }
3354 return AMDGPU_PCIE_GEN1;
3355}
3356
3357static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 3335static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3358 u32 *p, u32 *u) 3336 u32 *p, u32 *u)
3359{ 3337{
@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
5028 table->ACPIState.levels[0].vddc.index, 5006 table->ACPIState.levels[0].vddc.index,
5029 &table->ACPIState.levels[0].std_vddc); 5007 &table->ACPIState.levels[0].std_vddc);
5030 } 5008 }
5031 table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, 5009 table->ACPIState.levels[0].gen2PCIE =
5032 si_pi->sys_pcie_mask, 5010 (u8)amdgpu_get_pcie_gen_support(adev,
5033 si_pi->boot_pcie_gen, 5011 si_pi->sys_pcie_mask,
5034 AMDGPU_PCIE_GEN1); 5012 si_pi->boot_pcie_gen,
5013 AMDGPU_PCIE_GEN1);
5035 5014
5036 if (si_pi->vddc_phase_shed_control) 5015 if (si_pi->vddc_phase_shed_control)
5037 si_populate_phase_shedding_value(adev, 5016 si_populate_phase_shedding_value(adev,
@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7168 pl->vddc = le16_to_cpu(clock_info->si.usVDDC); 7147 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7169 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); 7148 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7170 pl->flags = le32_to_cpu(clock_info->si.ulFlags); 7149 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7171 pl->pcie_gen = r600_get_pcie_gen_support(adev, 7150 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
7172 si_pi->sys_pcie_mask, 7151 si_pi->sys_pcie_mask,
7173 si_pi->boot_pcie_gen, 7152 si_pi->boot_pcie_gen,
7174 clock_info->si.ucPCIEGen); 7153 clock_info->si.ucPCIEGen);
7175 7154
7176 /* patch up vddc if necessary */ 7155 /* patch up vddc if necessary */
7177 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, 7156 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
7326 struct si_power_info *si_pi; 7305 struct si_power_info *si_pi;
7327 struct atom_clock_dividers dividers; 7306 struct atom_clock_dividers dividers;
7328 int ret; 7307 int ret;
7329 u32 mask;
7330 7308
7331 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); 7309 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7332 if (si_pi == NULL) 7310 if (si_pi == NULL)
@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
7336 eg_pi = &ni_pi->eg; 7314 eg_pi = &ni_pi->eg;
7337 pi = &eg_pi->rv7xx; 7315 pi = &eg_pi->rv7xx;
7338 7316
7339 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 7317 si_pi->sys_pcie_mask =
7340 if (ret) 7318 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
7341 si_pi->sys_pcie_mask = 0; 7319 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
7342 else
7343 si_pi->sys_pcie_mask = mask;
7344 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 7320 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7345 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); 7321 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7346 7322
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index b2bfedaf57f1..9bab4842cd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1618 .set_wptr = uvd_v6_0_enc_ring_set_wptr, 1618 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1619 .emit_frame_size = 1619 .emit_frame_size =
1620 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ 1620 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1621 6 + /* uvd_v6_0_enc_ring_emit_vm_flush */ 1621 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1622 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ 1622 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1623 1, /* uvd_v6_0_enc_ring_insert_end */ 1623 1, /* uvd_v6_0_enc_ring_insert_end */
1624 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ 1624 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1ce4c98385e3..c345e645f1d7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -629,11 +629,13 @@ static int dm_resume(void *handle)
629{ 629{
630 struct amdgpu_device *adev = handle; 630 struct amdgpu_device *adev = handle;
631 struct amdgpu_display_manager *dm = &adev->dm; 631 struct amdgpu_display_manager *dm = &adev->dm;
632 int ret = 0;
632 633
633 /* power on hardware */ 634 /* power on hardware */
634 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 635 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
635 636
636 return 0; 637 ret = amdgpu_dm_display_resume(adev);
638 return ret;
637} 639}
638 640
639int amdgpu_dm_display_resume(struct amdgpu_device *adev) 641int amdgpu_dm_display_resume(struct amdgpu_device *adev)
@@ -1035,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)
1035 !is_mst_root_connector) { 1037 !is_mst_root_connector) {
1036 /* Downstream Port status changed. */ 1038 /* Downstream Port status changed. */
1037 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1039 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1040
1041 if (aconnector->fake_enable)
1042 aconnector->fake_enable = false;
1043
1038 amdgpu_dm_update_connector_after_detect(aconnector); 1044 amdgpu_dm_update_connector_after_detect(aconnector);
1039 1045
1040 1046
@@ -2010,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2010 dst.width = stream->timing.h_addressable; 2016 dst.width = stream->timing.h_addressable;
2011 dst.height = stream->timing.v_addressable; 2017 dst.height = stream->timing.v_addressable;
2012 2018
2013 rmx_type = dm_state->scaling; 2019 if (dm_state) {
2014 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 2020 rmx_type = dm_state->scaling;
2015 if (src.width * dst.height < 2021 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2016 src.height * dst.width) { 2022 if (src.width * dst.height <
2017 /* height needs less upscaling/more downscaling */ 2023 src.height * dst.width) {
2018 dst.width = src.width * 2024 /* height needs less upscaling/more downscaling */
2019 dst.height / src.height; 2025 dst.width = src.width *
2020 } else { 2026 dst.height / src.height;
2021 /* width needs less upscaling/more downscaling */ 2027 } else {
2022 dst.height = src.height * 2028 /* width needs less upscaling/more downscaling */
2023 dst.width / src.width; 2029 dst.height = src.height *
2030 dst.width / src.width;
2031 }
2032 } else if (rmx_type == RMX_CENTER) {
2033 dst = src;
2024 } 2034 }
2025 } else if (rmx_type == RMX_CENTER) {
2026 dst = src;
2027 }
2028 2035
2029 dst.x = (stream->timing.h_addressable - dst.width) / 2; 2036 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2030 dst.y = (stream->timing.v_addressable - dst.height) / 2; 2037 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2031 2038
2032 if (dm_state->underscan_enable) { 2039 if (dm_state->underscan_enable) {
2033 dst.x += dm_state->underscan_hborder / 2; 2040 dst.x += dm_state->underscan_hborder / 2;
2034 dst.y += dm_state->underscan_vborder / 2; 2041 dst.y += dm_state->underscan_vborder / 2;
2035 dst.width -= dm_state->underscan_hborder; 2042 dst.width -= dm_state->underscan_hborder;
2036 dst.height -= dm_state->underscan_vborder; 2043 dst.height -= dm_state->underscan_vborder;
2044 }
2037 } 2045 }
2038 2046
2039 stream->src = src; 2047 stream->src = src;
@@ -2358,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2358 2366
2359 if (aconnector == NULL) { 2367 if (aconnector == NULL) {
2360 DRM_ERROR("aconnector is NULL!\n"); 2368 DRM_ERROR("aconnector is NULL!\n");
2361 goto drm_connector_null; 2369 return stream;
2362 }
2363
2364 if (dm_state == NULL) {
2365 DRM_ERROR("dm_state is NULL!\n");
2366 goto dm_state_null;
2367 } 2370 }
2368 2371
2369 drm_connector = &aconnector->base; 2372 drm_connector = &aconnector->base;
@@ -2375,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2375 */ 2378 */
2376 if (aconnector->mst_port) { 2379 if (aconnector->mst_port) {
2377 dm_dp_mst_dc_sink_create(drm_connector); 2380 dm_dp_mst_dc_sink_create(drm_connector);
2378 goto mst_dc_sink_create_done; 2381 return stream;
2379 } 2382 }
2380 2383
2381 if (create_fake_sink(aconnector)) 2384 if (create_fake_sink(aconnector))
2382 goto stream_create_fail; 2385 return stream;
2383 } 2386 }
2384 2387
2385 stream = dc_create_stream_for_sink(aconnector->dc_sink); 2388 stream = dc_create_stream_for_sink(aconnector->dc_sink);
2386 2389
2387 if (stream == NULL) { 2390 if (stream == NULL) {
2388 DRM_ERROR("Failed to create stream for sink!\n"); 2391 DRM_ERROR("Failed to create stream for sink!\n");
2389 goto stream_create_fail; 2392 return stream;
2390 } 2393 }
2391 2394
2392 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 2395 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2412,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2412 } else { 2415 } else {
2413 decide_crtc_timing_for_drm_display_mode( 2416 decide_crtc_timing_for_drm_display_mode(
2414 &mode, preferred_mode, 2417 &mode, preferred_mode,
2415 dm_state->scaling != RMX_OFF); 2418 dm_state ? (dm_state->scaling != RMX_OFF) : false);
2416 } 2419 }
2417 2420
2421 if (!dm_state)
2422 drm_mode_set_crtcinfo(&mode, 0);
2423
2418 fill_stream_properties_from_drm_display_mode(stream, 2424 fill_stream_properties_from_drm_display_mode(stream,
2419 &mode, &aconnector->base); 2425 &mode, &aconnector->base);
2420 update_stream_scaling_settings(&mode, dm_state, stream); 2426 update_stream_scaling_settings(&mode, dm_state, stream);
@@ -2424,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2424 drm_connector, 2430 drm_connector,
2425 aconnector->dc_sink); 2431 aconnector->dc_sink);
2426 2432
2427stream_create_fail: 2433 update_stream_signal(stream);
2428dm_state_null: 2434
2429drm_connector_null:
2430mst_dc_sink_create_done:
2431 return stream; 2435 return stream;
2432} 2436}
2433 2437
@@ -2495,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
2495 return &state->base; 2499 return &state->base;
2496} 2500}
2497 2501
2502
2503static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2504{
2505 enum dc_irq_source irq_source;
2506 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2507 struct amdgpu_device *adev = crtc->dev->dev_private;
2508
2509 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2510 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2511}
2512
2513static int dm_enable_vblank(struct drm_crtc *crtc)
2514{
2515 return dm_set_vblank(crtc, true);
2516}
2517
2518static void dm_disable_vblank(struct drm_crtc *crtc)
2519{
2520 dm_set_vblank(crtc, false);
2521}
2522
2498/* Implemented only the options currently availible for the driver */ 2523/* Implemented only the options currently availible for the driver */
2499static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 2524static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2500 .reset = dm_crtc_reset_state, 2525 .reset = dm_crtc_reset_state,
@@ -2504,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2504 .page_flip = drm_atomic_helper_page_flip, 2529 .page_flip = drm_atomic_helper_page_flip,
2505 .atomic_duplicate_state = dm_crtc_duplicate_state, 2530 .atomic_duplicate_state = dm_crtc_duplicate_state,
2506 .atomic_destroy_state = dm_crtc_destroy_state, 2531 .atomic_destroy_state = dm_crtc_destroy_state,
2532 .enable_vblank = dm_enable_vblank,
2533 .disable_vblank = dm_disable_vblank,
2507}; 2534};
2508 2535
2509static enum drm_connector_status 2536static enum drm_connector_status
@@ -2798,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2798 goto fail; 2825 goto fail;
2799 } 2826 }
2800 2827
2801 stream = dc_create_stream_for_sink(dc_sink); 2828 stream = create_stream_for_sink(aconnector, mode, NULL);
2802 if (stream == NULL) { 2829 if (stream == NULL) {
2803 DRM_ERROR("Failed to create stream for sink!\n"); 2830 DRM_ERROR("Failed to create stream for sink!\n");
2804 goto fail; 2831 goto fail;
@@ -3058,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
3058 if (!dm_plane_state->dc_state) 3085 if (!dm_plane_state->dc_state)
3059 return 0; 3086 return 0;
3060 3087
3088 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3089 return -EINVAL;
3090
3061 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 3091 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3062 return 0; 3092 return 0;
3063 3093
@@ -4630,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc,
4630 bool pflip_needed = !state->allow_modeset; 4660 bool pflip_needed = !state->allow_modeset;
4631 int ret = 0; 4661 int ret = 0;
4632 4662
4633 if (pflip_needed)
4634 return ret;
4635 4663
4636 /* Add new planes */ 4664 /* Add new planes */
4637 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4665 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4646,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc,
4646 4674
4647 /* Remove any changed/removed planes */ 4675 /* Remove any changed/removed planes */
4648 if (!enable) { 4676 if (!enable) {
4677 if (pflip_needed)
4678 continue;
4649 4679
4650 if (!old_plane_crtc) 4680 if (!old_plane_crtc)
4651 continue; 4681 continue;
@@ -4677,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc,
4677 *lock_and_validation_needed = true; 4707 *lock_and_validation_needed = true;
4678 4708
4679 } else { /* Add new planes */ 4709 } else { /* Add new planes */
4710 struct dc_plane_state *dc_new_plane_state;
4680 4711
4681 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4712 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4682 continue; 4713 continue;
@@ -4690,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc,
4690 if (!dm_new_crtc_state->stream) 4721 if (!dm_new_crtc_state->stream)
4691 continue; 4722 continue;
4692 4723
4724 if (pflip_needed)
4725 continue;
4693 4726
4694 WARN_ON(dm_new_plane_state->dc_state); 4727 WARN_ON(dm_new_plane_state->dc_state);
4695 4728
4696 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4729 dc_new_plane_state = dc_create_plane_state(dc);
4697 4730 if (!dc_new_plane_state) {
4698 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4699 plane->base.id, new_plane_crtc->base.id);
4700
4701 if (!dm_new_plane_state->dc_state) {
4702 ret = -EINVAL; 4731 ret = -EINVAL;
4703 return ret; 4732 return ret;
4704 } 4733 }
4705 4734
4735 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4736 plane->base.id, new_plane_crtc->base.id);
4737
4706 ret = fill_plane_attributes( 4738 ret = fill_plane_attributes(
4707 new_plane_crtc->dev->dev_private, 4739 new_plane_crtc->dev->dev_private,
4708 dm_new_plane_state->dc_state, 4740 dc_new_plane_state,
4709 new_plane_state, 4741 new_plane_state,
4710 new_crtc_state); 4742 new_crtc_state);
4711 if (ret) 4743 if (ret) {
4744 dc_plane_state_release(dc_new_plane_state);
4712 return ret; 4745 return ret;
4746 }
4713 4747
4714 4748 /*
4749 * Any atomic check errors that occur after this will
4750 * not need a release. The plane state will be attached
4751 * to the stream, and therefore part of the atomic
4752 * state. It'll be released when the atomic state is
4753 * cleaned.
4754 */
4715 if (!dc_add_plane_to_context( 4755 if (!dc_add_plane_to_context(
4716 dc, 4756 dc,
4717 dm_new_crtc_state->stream, 4757 dm_new_crtc_state->stream,
4718 dm_new_plane_state->dc_state, 4758 dc_new_plane_state,
4719 dm_state->context)) { 4759 dm_state->context)) {
4720 4760
4761 dc_plane_state_release(dc_new_plane_state);
4721 ret = -EINVAL; 4762 ret = -EINVAL;
4722 return ret; 4763 return ret;
4723 } 4764 }
4724 4765
4766 dm_new_plane_state->dc_state = dc_new_plane_state;
4767
4725 /* Tell DC to do a full surface update every time there 4768 /* Tell DC to do a full surface update every time there
4726 * is a plane change. Inefficient, but works for now. 4769 * is a plane change. Inefficient, but works for now.
4727 */ 4770 */
@@ -4735,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc,
4735 return ret; 4778 return ret;
4736} 4779}
4737 4780
4781static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4782 struct drm_crtc *crtc)
4783{
4784 struct drm_plane *plane;
4785 struct drm_crtc_state *crtc_state;
4786
4787 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
4788
4789 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
4790 struct drm_plane_state *plane_state =
4791 drm_atomic_get_plane_state(state, plane);
4792
4793 if (IS_ERR(plane_state))
4794 return -EDEADLK;
4795
4796 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4797 if (crtc->primary == plane && crtc_state->active) {
4798 if (!plane_state->fb)
4799 return -EINVAL;
4800 }
4801 }
4802 return 0;
4803}
4804
4738static int amdgpu_dm_atomic_check(struct drm_device *dev, 4805static int amdgpu_dm_atomic_check(struct drm_device *dev,
4739 struct drm_atomic_state *state) 4806 struct drm_atomic_state *state)
4740{ 4807{
@@ -4758,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
4758 goto fail; 4825 goto fail;
4759 4826
4760 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4827 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4828 ret = dm_atomic_check_plane_state_fb(state, crtc);
4829 if (ret)
4830 goto fail;
4831
4761 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4832 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4762 !new_crtc_state->color_mgmt_changed) 4833 !new_crtc_state->color_mgmt_changed)
4763 continue; 4834 continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 1874b6cee6af..422055080df4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
683 683
684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
685{ 685{
686 if (adev->mode_info.num_crtc > 0) 686
687 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 687 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
688 else
689 adev->crtc_irq.num_types = 0;
690 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 688 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
691 689
692 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 690 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f3d87f418d2e..93421dad21bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
189 .link = aconnector->dc_link, 189 .link = aconnector->dc_link,
190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
191 191
192 /*
193 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
194 */
195 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
196 return;
197
192 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 198 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
193 199
194 if (!edid) { 200 if (!edid) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 35e84ed031de..12868c769606 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1359} 1359}
1360 1360
1361void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 1361bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1362{ 1362{
1363 1363
1364 if (dc == NULL) 1364 if (dc == NULL)
1365 return; 1365 return false;
1366 1366
1367 dal_irq_service_set(dc->res_pool->irqs, src, enable); 1367 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1368} 1368}
1369 1369
1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a37428271573..be5546181fa8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1749 link->link_enc, 1749 link->link_enc,
1750 pipe_ctx->clock_source->id, 1750 pipe_ctx->clock_source->id,
1751 display_color_depth, 1751 display_color_depth,
1752 pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, 1752 pipe_ctx->stream->signal,
1753 pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
1754 stream->phy_pix_clk); 1753 stream->phy_pix_clk);
1755 1754
1756 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 1755 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 33d91e4474ea..639421a00ab6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream,
1465 /* MST doesn't perform link training for now 1465 /* MST doesn't perform link training for now
1466 * TODO: add MST specific link training routine 1466 * TODO: add MST specific link training routine
1467 */ 1467 */
1468 if (is_mst_supported(link)) { 1468 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1469 *link_setting = link->verified_link_cap; 1469 *link_setting = link->verified_link_cap;
1470 return; 1470 return;
1471 } 1471 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 95b8dd0e53c6..4d07ffebfd31 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged(
1360 return true; 1360 return true;
1361} 1361}
1362 1362
1363/* Maximum TMDS single link pixel clock 165MHz */
1364#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
1365
1366static void update_stream_engine_usage( 1363static void update_stream_engine_usage(
1367 struct resource_context *res_ctx, 1364 struct resource_context *res_ctx,
1368 const struct resource_pool *pool, 1365 const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 261811e0c094..cd5819789d76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,8 +33,7 @@
33/******************************************************************************* 33/*******************************************************************************
34 * Private functions 34 * Private functions
35 ******************************************************************************/ 35 ******************************************************************************/
36#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 36void update_stream_signal(struct dc_stream_state *stream)
37static void update_stream_signal(struct dc_stream_state *stream)
38{ 37{
39 38
40 struct dc_sink *dc_sink = stream->sink; 39 struct dc_sink *dc_sink = stream->sink;
@@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)
45 stream->signal = dc_sink->sink_signal; 44 stream->signal = dc_sink->sink_signal;
46 45
47 if (dc_is_dvi_signal(stream->signal)) { 46 if (dc_is_dvi_signal(stream->signal)) {
48 if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && 47 if (stream->ctx->dc->caps.dual_link_dvi &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) 48 stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; 50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
51 else 51 else
52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -193,44 +193,20 @@ bool dc_stream_set_cursor_attributes(
193 193
194 core_dc = stream->ctx->dc; 194 core_dc = stream->ctx->dc;
195 res_ctx = &core_dc->current_state->res_ctx; 195 res_ctx = &core_dc->current_state->res_ctx;
196 stream->cursor_attributes = *attributes;
196 197
197 for (i = 0; i < MAX_PIPES; i++) { 198 for (i = 0; i < MAX_PIPES; i++) {
198 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
199 200
200 if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) 201 if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
202 !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
201 continue; 203 continue;
202 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) 204 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
203 continue; 205 continue;
204 206
205 207
206 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) 208 core_dc->hwss.set_cursor_attribute(pipe_ctx);
207 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
208 pipe_ctx->plane_res.ipp, attributes);
209
210 if (pipe_ctx->plane_res.hubp != NULL &&
211 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
212 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
213 pipe_ctx->plane_res.hubp, attributes);
214
215 if (pipe_ctx->plane_res.mi != NULL &&
216 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
217 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
218 pipe_ctx->plane_res.mi, attributes);
219
220
221 if (pipe_ctx->plane_res.xfm != NULL &&
222 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
223 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
224 pipe_ctx->plane_res.xfm, attributes);
225
226 if (pipe_ctx->plane_res.dpp != NULL &&
227 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
228 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
229 pipe_ctx->plane_res.dpp, attributes->color_format);
230 } 209 }
231
232 stream->cursor_attributes = *attributes;
233
234 return true; 210 return true;
235} 211}
236 212
@@ -254,55 +230,21 @@ bool dc_stream_set_cursor_position(
254 230
255 core_dc = stream->ctx->dc; 231 core_dc = stream->ctx->dc;
256 res_ctx = &core_dc->current_state->res_ctx; 232 res_ctx = &core_dc->current_state->res_ctx;
233 stream->cursor_position = *position;
257 234
258 for (i = 0; i < MAX_PIPES; i++) { 235 for (i = 0; i < MAX_PIPES; i++) {
259 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 236 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
260 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
261 struct mem_input *mi = pipe_ctx->plane_res.mi;
262 struct hubp *hubp = pipe_ctx->plane_res.hubp;
263 struct dpp *dpp = pipe_ctx->plane_res.dpp;
264 struct dc_cursor_position pos_cpy = *position;
265 struct dc_cursor_mi_param param = {
266 .pixel_clk_khz = stream->timing.pix_clk_khz,
267 .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
268 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
269 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
270 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
271 };
272 237
273 if (pipe_ctx->stream != stream || 238 if (pipe_ctx->stream != stream ||
274 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || 239 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
275 !pipe_ctx->plane_state || 240 !pipe_ctx->plane_state ||
276 (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp)) 241 (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
277 continue; 242 !pipe_ctx->plane_res.ipp)
278
279 if (pipe_ctx->plane_state->address.type
280 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
281 pos_cpy.enable = false;
282
283 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
284 pos_cpy.enable = false;
285
286
287 if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
288 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
289
290 if (mi != NULL && mi->funcs->set_cursor_position != NULL)
291 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
292
293 if (!hubp)
294 continue; 243 continue;
295 244
296 if (hubp->funcs->set_cursor_position != NULL) 245 core_dc->hwss.set_cursor_position(pipe_ctx);
297 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
298
299 if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
300 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
301
302 } 246 }
303 247
304 stream->cursor_position = *position;
305
306 return true; 248 return true;
307} 249}
308 250
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e2e3c9df79ea..d6d56611604e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -62,6 +62,7 @@ struct dc_caps {
62 bool dcc_const_color; 62 bool dcc_const_color;
63 bool dynamic_audio; 63 bool dynamic_audio;
64 bool is_apu; 64 bool is_apu;
65 bool dual_link_dvi;
65}; 66};
66 67
67struct dc_dcc_surface_param { 68struct dc_dcc_surface_param {
@@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
672 struct dc *dc, 673 struct dc *dc,
673 uint32_t src_id, 674 uint32_t src_id,
674 uint32_t ext_id); 675 uint32_t ext_id);
675void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); 676bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
676void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); 677void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
677enum dc_irq_source dc_get_hpd_irq_source_at_index( 678enum dc_irq_source dc_get_hpd_irq_source_at_index(
678 struct dc *dc, uint32_t link_index); 679 struct dc *dc, uint32_t link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 01c60f11b2bd..456e4d29eadd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
237 */ 237 */
238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); 238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
239 239
240void update_stream_signal(struct dc_stream_state *stream);
241
240void dc_stream_retain(struct dc_stream_state *dc_stream); 242void dc_stream_retain(struct dc_stream_state *dc_stream);
241void dc_stream_release(struct dc_stream_state *dc_stream); 243void dc_stream_release(struct dc_stream_state *dc_stream);
242 244
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index b73db9e78437..a993279a8f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -236,6 +236,7 @@
236 SR(D2VGA_CONTROL), \ 236 SR(D2VGA_CONTROL), \
237 SR(D3VGA_CONTROL), \ 237 SR(D3VGA_CONTROL), \
238 SR(D4VGA_CONTROL), \ 238 SR(D4VGA_CONTROL), \
239 SR(VGA_TEST_CONTROL), \
239 SR(DC_IP_REQUEST_CNTL), \ 240 SR(DC_IP_REQUEST_CNTL), \
240 BL_REG_LIST() 241 BL_REG_LIST()
241 242
@@ -337,6 +338,7 @@ struct dce_hwseq_registers {
337 uint32_t D2VGA_CONTROL; 338 uint32_t D2VGA_CONTROL;
338 uint32_t D3VGA_CONTROL; 339 uint32_t D3VGA_CONTROL;
339 uint32_t D4VGA_CONTROL; 340 uint32_t D4VGA_CONTROL;
341 uint32_t VGA_TEST_CONTROL;
340 /* MMHUB registers. read only. temporary hack */ 342 /* MMHUB registers. read only. temporary hack */
341 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; 343 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
342 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 344 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -493,6 +495,9 @@ struct dce_hwseq_registers {
493 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ 495 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
494 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
495 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
500 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
496 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 501 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
497 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 502 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
498 503
@@ -583,7 +588,10 @@ struct dce_hwseq_registers {
583 type DCFCLK_GATE_DIS; \ 588 type DCFCLK_GATE_DIS; \
584 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 589 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
585 type DENTIST_DPPCLK_WDIVIDER; \ 590 type DENTIST_DPPCLK_WDIVIDER; \
586 type DENTIST_DISPCLK_WDIVIDER; 591 type DENTIST_DISPCLK_WDIVIDER; \
592 type VGA_TEST_ENABLE; \
593 type VGA_TEST_RENDER_START; \
594 type D1VGA_MODE_ENABLE;
587 595
588struct dce_hwseq_shift { 596struct dce_hwseq_shift {
589 HWSEQ_REG_FIELD_LIST(uint8_t) 597 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index a266e3f5e75f..e4741f1a2b01 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -82,13 +82,6 @@
82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
84 84
85/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
86#define TMDS_MIN_PIXEL_CLOCK 25000
87/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
88#define TMDS_MAX_PIXEL_CLOCK 165000
89/* For current ASICs pixel clock - 600MHz */
90#define MAX_ENCODER_CLOCK 600000
91
92enum { 85enum {
93 DP_MST_UPDATE_MAX_RETRY = 50 86 DP_MST_UPDATE_MAX_RETRY = 50
94}; 87};
@@ -683,6 +676,7 @@ void dce110_link_encoder_construct(
683{ 676{
684 struct bp_encoder_cap_info bp_cap_info = {0}; 677 struct bp_encoder_cap_info bp_cap_info = {0};
685 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; 678 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
679 enum bp_result result = BP_RESULT_OK;
686 680
687 enc110->base.funcs = &dce110_lnk_enc_funcs; 681 enc110->base.funcs = &dce110_lnk_enc_funcs;
688 enc110->base.ctx = init_data->ctx; 682 enc110->base.ctx = init_data->ctx;
@@ -757,15 +751,24 @@ void dce110_link_encoder_construct(
757 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; 751 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
758 } 752 }
759 753
754 /* default to one to mirror Windows behavior */
755 enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
756
757 result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
758 enc110->base.id, &bp_cap_info);
759
760 /* Override features with DCE-specific values */ 760 /* Override features with DCE-specific values */
761 if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( 761 if (BP_RESULT_OK == result) {
762 enc110->base.ctx->dc_bios, enc110->base.id,
763 &bp_cap_info)) {
764 enc110->base.features.flags.bits.IS_HBR2_CAPABLE = 762 enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
765 bp_cap_info.DP_HBR2_EN; 763 bp_cap_info.DP_HBR2_EN;
766 enc110->base.features.flags.bits.IS_HBR3_CAPABLE = 764 enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
767 bp_cap_info.DP_HBR3_EN; 765 bp_cap_info.DP_HBR3_EN;
768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; 766 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
767 } else {
768 dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
769 "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
770 __func__,
771 result);
769 } 772 }
770} 773}
771 774
@@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output(
904 struct link_encoder *enc, 907 struct link_encoder *enc,
905 enum clock_source_id clock_source, 908 enum clock_source_id clock_source,
906 enum dc_color_depth color_depth, 909 enum dc_color_depth color_depth,
907 bool hdmi, 910 enum signal_type signal,
908 bool dual_link,
909 uint32_t pixel_clock) 911 uint32_t pixel_clock)
910{ 912{
911 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 913 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
@@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output(
919 cntl.engine_id = enc->preferred_engine; 921 cntl.engine_id = enc->preferred_engine;
920 cntl.transmitter = enc110->base.transmitter; 922 cntl.transmitter = enc110->base.transmitter;
921 cntl.pll_id = clock_source; 923 cntl.pll_id = clock_source;
922 if (hdmi) { 924 cntl.signal = signal;
923 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; 925 if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
924 cntl.lanes_number = 4;
925 } else if (dual_link) {
926 cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
927 cntl.lanes_number = 8; 926 cntl.lanes_number = 8;
928 } else { 927 else
929 cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
930 cntl.lanes_number = 4; 928 cntl.lanes_number = 4;
931 } 929
932 cntl.hpd_sel = enc110->base.hpd_source; 930 cntl.hpd_sel = enc110->base.hpd_source;
933 931
934 cntl.pixel_clock = pixel_clock; 932 cntl.pixel_clock = pixel_clock;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 8ca9afe47a2b..0ec3433d34b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
210 struct link_encoder *enc, 210 struct link_encoder *enc,
211 enum clock_source_id clock_source, 211 enum clock_source_id clock_source,
212 enum dc_color_depth color_depth, 212 enum dc_color_depth color_depth,
213 bool hdmi, 213 enum signal_type signal,
214 bool dual_link,
215 uint32_t pixel_clock); 214 uint32_t pixel_clock);
216 215
217/* enables DP PHY output */ 216/* enables DP PHY output */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3ea43e2a9450..442dd2d93618 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -852,6 +852,7 @@ static bool construct(
852 dc->caps.max_downscale_ratio = 200; 852 dc->caps.max_downscale_ratio = 200;
853 dc->caps.i2c_speed_in_khz = 40; 853 dc->caps.i2c_speed_in_khz = 40;
854 dc->caps.max_cursor_size = 128; 854 dc->caps.max_cursor_size = 128;
855 dc->caps.dual_link_dvi = true;
855 856
856 for (i = 0; i < pool->base.pipe_count; i++) { 857 for (i = 0; i < pool->base.pipe_count; i++) {
857 pool->base.timing_generators[i] = 858 pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 86cdd7b4811f..6f382a3ac90f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
689 struct dc_link *link = pipe_ctx->stream->sink->link; 689 struct dc_link *link = pipe_ctx->stream->sink->link;
690 690
691 /* 1. update AVI info frame (HDMI, DP) 691
692 * we always need to update info frame
693 */
694 uint32_t active_total_with_borders; 692 uint32_t active_total_with_borders;
695 uint32_t early_control = 0; 693 uint32_t early_control = 0;
696 struct timing_generator *tg = pipe_ctx->stream_res.tg; 694 struct timing_generator *tg = pipe_ctx->stream_res.tg;
697 695
698 /* TODOFPGA may change to hwss.update_info_frame */ 696 /* For MST, there are multiply stream go to only one link.
697 * connect DIG back_end to front_end while enable_stream and
698 * disconnect them during disable_stream
699 * BY this, it is logic clean to separate stream and link */
700 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
701 pipe_ctx->stream_res.stream_enc->id, true);
702
703 /* update AVI info frame (HDMI, DP)*/
704 /* TODO: FPGA may change to hwss.update_info_frame */
699 dce110_update_info_frame(pipe_ctx); 705 dce110_update_info_frame(pipe_ctx);
706
700 /* enable early control to avoid corruption on DP monitor*/ 707 /* enable early control to avoid corruption on DP monitor*/
701 active_total_with_borders = 708 active_total_with_borders =
702 timing->h_addressable 709 timing->h_addressable
@@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
717 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 724 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
718 } 725 }
719 726
720 /* For MST, there are multiply stream go to only one link. 727
721 * connect DIG back_end to front_end while enable_stream and 728
722 * disconnect them during disable_stream
723 * BY this, it is logic clean to separate stream and link */
724 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
725 pipe_ctx->stream_res.stream_enc->id, true);
726 729
727} 730}
728 731
@@ -1690,9 +1693,13 @@ static void apply_min_clocks(
1690 * Check if FBC can be enabled 1693 * Check if FBC can be enabled
1691 */ 1694 */
1692static bool should_enable_fbc(struct dc *dc, 1695static bool should_enable_fbc(struct dc *dc,
1693 struct dc_state *context) 1696 struct dc_state *context,
1697 uint32_t *pipe_idx)
1694{ 1698{
1695 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1699 uint32_t i;
1700 struct pipe_ctx *pipe_ctx = NULL;
1701 struct resource_context *res_ctx = &context->res_ctx;
1702
1696 1703
1697 ASSERT(dc->fbc_compressor); 1704 ASSERT(dc->fbc_compressor);
1698 1705
@@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc,
1704 if (context->stream_count != 1) 1711 if (context->stream_count != 1)
1705 return false; 1712 return false;
1706 1713
1714 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1715 if (res_ctx->pipe_ctx[i].stream) {
1716 pipe_ctx = &res_ctx->pipe_ctx[i];
1717 *pipe_idx = i;
1718 break;
1719 }
1720 }
1721
1707 /* Only supports eDP */ 1722 /* Only supports eDP */
1708 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) 1723 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
1709 return false; 1724 return false;
@@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc,
1729static void enable_fbc(struct dc *dc, 1744static void enable_fbc(struct dc *dc,
1730 struct dc_state *context) 1745 struct dc_state *context)
1731{ 1746{
1732 if (should_enable_fbc(dc, context)) { 1747 uint32_t pipe_idx = 0;
1748
1749 if (should_enable_fbc(dc, context, &pipe_idx)) {
1733 /* Program GRPH COMPRESSED ADDRESS and PITCH */ 1750 /* Program GRPH COMPRESSED ADDRESS and PITCH */
1734 struct compr_addr_and_pitch_params params = {0, 0, 0}; 1751 struct compr_addr_and_pitch_params params = {0, 0, 0};
1735 struct compressor *compr = dc->fbc_compressor; 1752 struct compressor *compr = dc->fbc_compressor;
1736 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1753 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
1754
1737 1755
1738 params.source_view_width = pipe_ctx->stream->timing.h_addressable; 1756 params.source_view_width = pipe_ctx->stream->timing.h_addressable;
1739 params.source_view_height = pipe_ctx->stream->timing.v_addressable; 1757 params.source_view_height = pipe_ctx->stream->timing.v_addressable;
@@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
2915 } 2933 }
2916} 2934}
2917 2935
2936void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
2937{
2938 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2939 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
2940 struct mem_input *mi = pipe_ctx->plane_res.mi;
2941 struct dc_cursor_mi_param param = {
2942 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2943 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2944 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2945 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2946 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2947 };
2948
2949 if (pipe_ctx->plane_state->address.type
2950 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2951 pos_cpy.enable = false;
2952
2953 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2954 pos_cpy.enable = false;
2955
2956 if (ipp->funcs->ipp_cursor_set_position)
2957 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
2958 if (mi->funcs->set_cursor_position)
2959 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
2960}
2961
2962void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2963{
2964 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2965
2966 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
2967 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
2968 pipe_ctx->plane_res.ipp, attributes);
2969
2970 if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
2971 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
2972 pipe_ctx->plane_res.mi, attributes);
2973
2974 if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
2975 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
2976 pipe_ctx->plane_res.xfm, attributes);
2977}
2978
2918static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} 2979static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
2919 2980
2920static void optimize_shared_resources(struct dc *dc) {} 2981static void optimize_shared_resources(struct dc *dc) {}
@@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2957 .edp_backlight_control = hwss_edp_backlight_control, 3018 .edp_backlight_control = hwss_edp_backlight_control,
2958 .edp_power_control = hwss_edp_power_control, 3019 .edp_power_control = hwss_edp_power_control,
2959 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 3020 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
3021 .set_cursor_position = dce110_set_cursor_position,
3022 .set_cursor_attribute = dce110_set_cursor_attribute
2960}; 3023};
2961 3024
2962void dce110_hw_sequencer_construct(struct dc *dc) 3025void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c4779578fb7..00f18c485e1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
846 return result; 846 return result;
847} 847}
848 848
849enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
850 struct dc_caps *caps)
851{
852 if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
853 ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
854 return DC_FAIL_SURFACE_VALIDATE;
855
856 return DC_OK;
857}
858
849static bool dce110_validate_surface_sets( 859static bool dce110_validate_surface_sets(
850 struct dc_state *context) 860 struct dc_state *context)
851{ 861{
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
869 plane->src_rect.height > 1080)) 879 plane->src_rect.height > 1080))
870 return false; 880 return false;
871 881
882 /* we don't have the logic to support underlay
883 * only yet so block the use case where we get
884 * NV12 plane as top layer
885 */
886 if (j == 0)
887 return false;
888
872 /* irrespective of plane format, 889 /* irrespective of plane format,
873 * stream should be RGB encoded 890 * stream should be RGB encoded
874 */ 891 */
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
1021 .link_enc_create = dce110_link_encoder_create, 1038 .link_enc_create = dce110_link_encoder_create,
1022 .validate_guaranteed = dce110_validate_guaranteed, 1039 .validate_guaranteed = dce110_validate_guaranteed,
1023 .validate_bandwidth = dce110_validate_bandwidth, 1040 .validate_bandwidth = dce110_validate_bandwidth,
1041 .validate_plane = dce110_validate_plane,
1024 .acquire_idle_pipe_for_layer = dce110_acquire_underlay, 1042 .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
1025 .add_stream_to_ctx = dce110_add_stream_to_ctx, 1043 .add_stream_to_ctx = dce110_add_stream_to_ctx,
1026 .validate_global = dce110_validate_global 1044 .validate_global = dce110_validate_global
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 663e0a047a4b..98d9cd0109e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -1103,6 +1103,8 @@ static bool construct(
1103 dc->caps.max_downscale_ratio = 200; 1103 dc->caps.max_downscale_ratio = 200;
1104 dc->caps.i2c_speed_in_khz = 100; 1104 dc->caps.i2c_speed_in_khz = 100;
1105 dc->caps.max_cursor_size = 128; 1105 dc->caps.max_cursor_size = 128;
1106 dc->caps.dual_link_dvi = true;
1107
1106 1108
1107 /************************************************* 1109 /*************************************************
1108 * Create resources * 1110 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 57cd67359567..5aab01db28ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -835,6 +835,8 @@ static bool construct(
835 dc->caps.max_downscale_ratio = 200; 835 dc->caps.max_downscale_ratio = 200;
836 dc->caps.i2c_speed_in_khz = 100; 836 dc->caps.i2c_speed_in_khz = 100;
837 dc->caps.max_cursor_size = 128; 837 dc->caps.max_cursor_size = 128;
838 dc->caps.dual_link_dvi = true;
839
838 dc->debug = debug_defaults; 840 dc->debug = debug_defaults;
839 841
840 /************************************************* 842 /*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 8f2bd56f3461..25d7eb1567ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -793,6 +793,7 @@ static bool dce80_construct(
793 dc->caps.max_downscale_ratio = 200; 793 dc->caps.max_downscale_ratio = 200;
794 dc->caps.i2c_speed_in_khz = 40; 794 dc->caps.i2c_speed_in_khz = 40;
795 dc->caps.max_cursor_size = 128; 795 dc->caps.max_cursor_size = 128;
796 dc->caps.dual_link_dvi = true;
796 797
797 /************************************************* 798 /*************************************************
798 * Create resources * 799 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 82572863acab..072e4485e85e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -238,10 +238,24 @@ static void enable_power_gating_plane(
238static void disable_vga( 238static void disable_vga(
239 struct dce_hwseq *hws) 239 struct dce_hwseq *hws)
240{ 240{
241 unsigned int in_vga_mode = 0;
242
243 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
244
245 if (in_vga_mode == 0)
246 return;
247
241 REG_WRITE(D1VGA_CONTROL, 0); 248 REG_WRITE(D1VGA_CONTROL, 0);
242 REG_WRITE(D2VGA_CONTROL, 0); 249
243 REG_WRITE(D3VGA_CONTROL, 0); 250 /* HW Engineer's Notes:
244 REG_WRITE(D4VGA_CONTROL, 0); 251 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
252 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
253 *
254 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
255 * VGA_TEST_ENABLE, to leave it in the same state as before.
256 */
257 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
258 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
245} 259}
246 260
247static void dpp_pg_control( 261static void dpp_pg_control(
@@ -1761,6 +1775,11 @@ static void update_dchubp_dpp(
1761 &pipe_ctx->plane_res.scl_data.viewport_c); 1775 &pipe_ctx->plane_res.scl_data.viewport_c);
1762 } 1776 }
1763 1777
1778 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
1779 dc->hwss.set_cursor_position(pipe_ctx);
1780 dc->hwss.set_cursor_attribute(pipe_ctx);
1781 }
1782
1764 if (plane_state->update_flags.bits.full_update) { 1783 if (plane_state->update_flags.bits.full_update) {
1765 /*gamut remap*/ 1784 /*gamut remap*/
1766 program_gamut_remap(pipe_ctx); 1785 program_gamut_remap(pipe_ctx);
@@ -2296,7 +2315,7 @@ static bool dcn10_dummy_display_power_gating(
2296 return true; 2315 return true;
2297} 2316}
2298 2317
2299void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) 2318static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2300{ 2319{
2301 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2320 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2302 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2321 struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2316,12 +2335,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2316 } 2335 }
2317} 2336}
2318 2337
2319void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) 2338static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2320{ 2339{
2321 if (hws->ctx->dc->res_pool->hubbub != NULL) 2340 if (hws->ctx->dc->res_pool->hubbub != NULL)
2322 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); 2341 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
2323} 2342}
2324 2343
2344static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2345{
2346 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2347 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2348 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2349 struct dc_cursor_mi_param param = {
2350 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2351 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2352 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2353 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2354 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2355 };
2356
2357 if (pipe_ctx->plane_state->address.type
2358 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2359 pos_cpy.enable = false;
2360
2361 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2362 pos_cpy.enable = false;
2363
2364 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
2365 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
2366}
2367
2368static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2369{
2370 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2371
2372 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
2373 pipe_ctx->plane_res.hubp, attributes);
2374 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
2375 pipe_ctx->plane_res.dpp, attributes->color_format);
2376}
2377
2325static const struct hw_sequencer_funcs dcn10_funcs = { 2378static const struct hw_sequencer_funcs dcn10_funcs = {
2326 .program_gamut_remap = program_gamut_remap, 2379 .program_gamut_remap = program_gamut_remap,
2327 .program_csc_matrix = program_csc_matrix, 2380 .program_csc_matrix = program_csc_matrix,
@@ -2362,6 +2415,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2362 .edp_backlight_control = hwss_edp_backlight_control, 2415 .edp_backlight_control = hwss_edp_backlight_control,
2363 .edp_power_control = hwss_edp_power_control, 2416 .edp_power_control = hwss_edp_power_control,
2364 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 2417 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
2418 .set_cursor_position = dcn10_set_cursor_position,
2419 .set_cursor_attribute = dcn10_set_cursor_attribute
2365}; 2420};
2366 2421
2367 2422
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 0fd329deacd8..54d8a1386142 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -123,8 +123,7 @@ struct link_encoder_funcs {
123 void (*enable_tmds_output)(struct link_encoder *enc, 123 void (*enable_tmds_output)(struct link_encoder *enc,
124 enum clock_source_id clock_source, 124 enum clock_source_id clock_source,
125 enum dc_color_depth color_depth, 125 enum dc_color_depth color_depth,
126 bool hdmi, 126 enum signal_type signal,
127 bool dual_link,
128 uint32_t pixel_clock); 127 uint32_t pixel_clock);
129 void (*enable_dp_output)(struct link_encoder *enc, 128 void (*enable_dp_output)(struct link_encoder *enc,
130 const struct dc_link_settings *link_settings, 129 const struct dc_link_settings *link_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 4c0aa56f7bae..379c6ecd271a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -198,6 +198,9 @@ struct hw_sequencer_funcs {
198 bool enable); 198 bool enable);
199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); 199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
200 200
201 void (*set_cursor_position)(struct pipe_ctx *pipe);
202 void (*set_cursor_attribute)(struct pipe_ctx *pipe);
203
201}; 204};
202 205
203void color_space_to_black_color( 206void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index f7e40b292dfb..d3e1923b01a8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -217,7 +217,7 @@ bool dce110_vblank_set(
217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; 217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
218 218
219 if (enable) { 219 if (enable) {
220 if (!tg->funcs->arm_vert_intr(tg, 2)) { 220 if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
221 DC_ERROR("Failed to get VBLANK!\n"); 221 DC_ERROR("Failed to get VBLANK!\n");
222 return false; 222 return false;
223 } 223 }
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 57a54a7b89e5..1c079ba37c30 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
42 struct link_encoder *enc, 42 struct link_encoder *enc,
43 enum clock_source_id clock_source, 43 enum clock_source_id clock_source,
44 enum dc_color_depth color_depth, 44 enum dc_color_depth color_depth,
45 bool hdmi, 45 enum signal_type signal,
46 bool dual_link,
47 uint32_t pixel_clock) {} 46 uint32_t pixel_clock) {}
48 47
49static void virtual_link_encoder_enable_dp_output( 48static void virtual_link_encoder_enable_dp_output(
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 7a9b43f84a31..36bbad594267 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -419,11 +419,6 @@ struct bios_event_info {
419 bool backlight_changed; 419 bool backlight_changed;
420}; 420};
421 421
422enum {
423 HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
424 TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
425};
426
427/* 422/*
428 * DFS-bypass flag 423 * DFS-bypass flag
429 */ 424 */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index b5ebde642207..199c5db67cbc 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -26,6 +26,11 @@
26#ifndef __DC_SIGNAL_TYPES_H__ 26#ifndef __DC_SIGNAL_TYPES_H__
27#define __DC_SIGNAL_TYPES_H__ 27#define __DC_SIGNAL_TYPES_H__
28 28
29/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
30#define TMDS_MIN_PIXEL_CLOCK 25000
31/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
32#define TMDS_MAX_PIXEL_CLOCK 165000
33
29enum signal_type { 34enum signal_type {
30 SIGNAL_TYPE_NONE = 0L, /* no signal */ 35 SIGNAL_TYPE_NONE = 0L, /* no signal */
31 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), 36 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 41e42beff213..08e8a793714f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
2756 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 2756 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
2757 2757
2758 2758
2759 disable_mclk_switching = ((1 < info.display_count) || 2759 if (info.display_count == 0)
2760 disable_mclk_switching_for_frame_lock || 2760 disable_mclk_switching = false;
2761 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) || 2761 else
2762 (mode_info.refresh_rate > 120)); 2762 disable_mclk_switching = ((1 < info.display_count) ||
2763 disable_mclk_switching_for_frame_lock ||
2764 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
2765 (mode_info.refresh_rate > 120));
2763 2766
2764 sclk = smu7_ps->performance_levels[0].engine_clock; 2767 sclk = smu7_ps->performance_levels[0].engine_clock;
2765 mclk = smu7_ps->performance_levels[0].memory_clock; 2768 mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
4534 int tmp_result, result = 0; 4537 int tmp_result, result = 0;
4535 uint32_t sclk_mask = 0, mclk_mask = 0; 4538 uint32_t sclk_mask = 0, mclk_mask = 0;
4536 4539
4537 if (hwmgr->chip_id == CHIP_FIJI) {
4538 if (request->type == AMD_PP_GFX_PROFILE)
4539 smu7_enable_power_containment(hwmgr);
4540 else if (request->type == AMD_PP_COMPUTE_PROFILE)
4541 smu7_disable_power_containment(hwmgr);
4542 }
4543
4544 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO) 4540 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4545 return -EINVAL; 4541 return -EINVAL;
4546 4542
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 2d55dabc77d4..5f9c3efb532f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3168 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); 3168 disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3169 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); 3169 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3170 3170
3171 disable_mclk_switching = (info.display_count > 1) || 3171 if (info.display_count == 0)
3172 disable_mclk_switching_for_frame_lock || 3172 disable_mclk_switching = false;
3173 disable_mclk_switching_for_vr || 3173 else
3174 force_mclk_high; 3174 disable_mclk_switching = (info.display_count > 1) ||
3175 disable_mclk_switching_for_frame_lock ||
3176 disable_mclk_switching_for_vr ||
3177 force_mclk_high;
3175 3178
3176 sclk = vega10_ps->performance_levels[0].gfx_clock; 3179 sclk = vega10_ps->performance_levels[0].gfx_clock;
3177 mclk = vega10_ps->performance_levels[0].mem_clock; 3180 mclk = vega10_ps->performance_levels[0].mem_clock;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 5a13ff29f4f0..c0530a1af5e3 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,
121 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); 121 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
122 r.handles[0] = or->handle; 122 r.handles[0] = or->handle;
123 123
124 if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
125 dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
126 r.pixel_format = DRM_FORMAT_XBGR2101010;
127
124 ret = drm_mode_addfb2(dev, &r, file_priv); 128 ret = drm_mode_addfb2(dev, &r, file_priv);
125 if (ret) 129 if (ret)
126 return ret; 130 return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index c8454ac43fae..db6b94dda5df 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -471,6 +471,7 @@ struct parser_exec_state {
471 * used when ret from 2nd level batch buffer 471 * used when ret from 2nd level batch buffer
472 */ 472 */
473 int saved_buf_addr_type; 473 int saved_buf_addr_type;
474 bool is_ctx_wa;
474 475
475 struct cmd_info *info; 476 struct cmd_info *info;
476 477
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1715 bb->accessing = true; 1716 bb->accessing = true;
1716 bb->bb_start_cmd_va = s->ip_va; 1717 bb->bb_start_cmd_va = s->ip_va;
1717 1718
1719 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1720 bb->bb_offset = s->ip_va - s->rb_va;
1721 else
1722 bb->bb_offset = 0;
1723
1718 /* 1724 /*
1719 * ip_va saves the virtual address of the shadow batch buffer, while 1725 * ip_va saves the virtual address of the shadow batch buffer, while
1720 * ip_gma saves the graphics address of the original batch buffer. 1726 * ip_gma saves the graphics address of the original batch buffer.
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
2571 s.ring_tail = gma_tail; 2577 s.ring_tail = gma_tail;
2572 s.rb_va = workload->shadow_ring_buffer_va; 2578 s.rb_va = workload->shadow_ring_buffer_va;
2573 s.workload = workload; 2579 s.workload = workload;
2580 s.is_ctx_wa = false;
2574 2581
2575 if ((bypass_scan_mask & (1 << workload->ring_id)) || 2582 if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2576 gma_head == gma_tail) 2583 gma_head == gma_tail)
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2624 s.ring_tail = gma_tail; 2631 s.ring_tail = gma_tail;
2625 s.rb_va = wa_ctx->indirect_ctx.shadow_va; 2632 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2626 s.workload = workload; 2633 s.workload = workload;
2634 s.is_ctx_wa = true;
2627 2635
2628 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { 2636 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2629 ret = -EINVAL; 2637 ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 256f1bb522b7..152df3d0291e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
394 * performace for batch mmio read/write, so we need 394 * performace for batch mmio read/write, so we need
395 * handle forcewake mannually. 395 * handle forcewake mannually.
396 */ 396 */
397 intel_runtime_pm_get(dev_priv);
397 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 398 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
398 switch_mmio(pre, next, ring_id); 399 switch_mmio(pre, next, ring_id);
399 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 400 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
401 intel_runtime_pm_put(dev_priv);
400} 402}
401 403
402/** 404/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b55b3580ca1d..d74d6f05c62c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(
52 pdp_pair[i].val = pdp[7 - i]; 52 pdp_pair[i].val = pdp[7 - i];
53} 53}
54 54
55/*
56 * when populating shadow ctx from guest, we should not overrride oa related
57 * registers, so that they will not be overlapped by guest oa configs. Thus
58 * made it possible to capture oa data from host for both host and guests.
59 */
60static void sr_oa_regs(struct intel_vgpu_workload *workload,
61 u32 *reg_state, bool save)
62{
63 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
64 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
65 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
66 int i = 0;
67 u32 flex_mmio[] = {
68 i915_mmio_reg_offset(EU_PERF_CNTL0),
69 i915_mmio_reg_offset(EU_PERF_CNTL1),
70 i915_mmio_reg_offset(EU_PERF_CNTL2),
71 i915_mmio_reg_offset(EU_PERF_CNTL3),
72 i915_mmio_reg_offset(EU_PERF_CNTL4),
73 i915_mmio_reg_offset(EU_PERF_CNTL5),
74 i915_mmio_reg_offset(EU_PERF_CNTL6),
75 };
76
77 if (!workload || !reg_state || workload->ring_id != RCS)
78 return;
79
80 if (save) {
81 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
82
83 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
84 u32 state_offset = ctx_flexeu0 + i * 2;
85
86 workload->flex_mmio[i] = reg_state[state_offset + 1];
87 }
88 } else {
89 reg_state[ctx_oactxctrl] =
90 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
91 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
92
93 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
94 u32 state_offset = ctx_flexeu0 + i * 2;
95 u32 mmio = flex_mmio[i];
96
97 reg_state[state_offset] = mmio;
98 reg_state[state_offset + 1] = workload->flex_mmio[i];
99 }
100 }
101}
102
55static int populate_shadow_context(struct intel_vgpu_workload *workload) 103static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{ 104{
57 struct intel_vgpu *vgpu = workload->vgpu; 105 struct intel_vgpu *vgpu = workload->vgpu;
@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 146 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 shadow_ring_context = kmap(page); 147 shadow_ring_context = kmap(page);
100 148
149 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
101#define COPY_REG(name) \ 150#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ 151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 152 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
122 sizeof(*shadow_ring_context), 171 sizeof(*shadow_ring_context),
123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 172 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124 173
174 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
125 kunmap(page); 175 kunmap(page);
126 return 0; 176 return 0;
127} 177}
@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
376 goto err; 426 goto err;
377 } 427 }
378 428
429 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
430 * is only updated into ring_scan_buffer, not real ring address
431 * allocated in later copy_workload_to_ring_buffer. pls be noted
432 * shadow_ring_buffer_va is now pointed to real ring buffer va
433 * in copy_workload_to_ring_buffer.
434 */
435
436 if (bb->bb_offset)
437 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
438 + bb->bb_offset;
439
379 /* relocate shadow batch buffer */ 440 /* relocate shadow batch buffer */
380 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); 441 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
381 if (gmadr_bytes == 8) 442 if (gmadr_bytes == 8)
@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1044 1105
1045 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1106 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1046 1107
1047 s->workloads = kmem_cache_create("gvt-g_vgpu_workload", 1108 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1048 sizeof(struct intel_vgpu_workload), 0, 1109 sizeof(struct intel_vgpu_workload), 0,
1049 SLAB_HWCACHE_ALIGN, 1110 SLAB_HWCACHE_ALIGN,
1050 NULL); 1111 offsetof(struct intel_vgpu_workload, rb_tail),
1112 sizeof_field(struct intel_vgpu_workload, rb_tail),
1113 NULL);
1051 1114
1052 if (!s->workloads) { 1115 if (!s->workloads) {
1053 ret = -ENOMEM; 1116 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ff175a98b19e..a79a4f60637e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -110,6 +110,10 @@ struct intel_vgpu_workload {
110 /* shadow batch buffer */ 110 /* shadow batch buffer */
111 struct list_head shadow_bb; 111 struct list_head shadow_bb;
112 struct intel_shadow_wa_ctx wa_ctx; 112 struct intel_shadow_wa_ctx wa_ctx;
113
114 /* oa registers */
115 u32 oactxctrl;
116 u32 flex_mmio[7];
113}; 117};
114 118
115struct intel_vgpu_shadow_bb { 119struct intel_vgpu_shadow_bb {
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {
120 u32 *bb_start_cmd_va; 124 u32 *bb_start_cmd_va;
121 unsigned int clflush; 125 unsigned int clflush;
122 bool accessing; 126 bool accessing;
127 unsigned long bb_offset;
123}; 128};
124 129
125#define workload_q_head(vgpu, ring_id) \ 130#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dd89abd2263d..6ff5d655c202 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
434 dma_fence_put(shared[i]); 434 dma_fence_put(shared[i]);
435 kfree(shared); 435 kfree(shared);
436 436
437 /*
438 * If both shared fences and an exclusive fence exist,
439 * then by construction the shared fences must be later
440 * than the exclusive fence. If we successfully wait for
441 * all the shared fences, we know that the exclusive fence
442 * must all be signaled. If all the shared fences are
443 * signaled, we can prune the array and recover the
444 * floating references on the fences/requests.
445 */
437 prune_fences = count && timeout >= 0; 446 prune_fences = count && timeout >= 0;
438 } else { 447 } else {
439 excl = reservation_object_get_excl_rcu(resv); 448 excl = reservation_object_get_excl_rcu(resv);
440 } 449 }
441 450
442 if (excl && timeout >= 0) { 451 if (excl && timeout >= 0)
443 timeout = i915_gem_object_wait_fence(excl, flags, timeout, 452 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444 rps_client); 453 rps_client);
445 prune_fences = timeout >= 0;
446 }
447 454
448 dma_fence_put(excl); 455 dma_fence_put(excl);
449 456
450 /* Oportunistically prune the fences iff we know they have *all* been 457 /*
458 * Opportunistically prune the fences iff we know they have *all* been
451 * signaled and that the reservation object has not been changed (i.e. 459 * signaled and that the reservation object has not been changed (i.e.
452 * no new fences have been added). 460 * no new fences have been added).
453 */ 461 */
@@ -3205,8 +3213,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3205 * rolling the global seqno forward (since this would complete requests 3213 * rolling the global seqno forward (since this would complete requests
3206 * for which we haven't set the fence error to EIO yet). 3214 * for which we haven't set the fence error to EIO yet).
3207 */ 3215 */
3208 for_each_engine(engine, i915, id) 3216 for_each_engine(engine, i915, id) {
3217 i915_gem_reset_prepare_engine(engine);
3209 engine->submit_request = nop_submit_request; 3218 engine->submit_request = nop_submit_request;
3219 }
3210 3220
3211 /* 3221 /*
3212 * Make sure no one is running the old callback before we proceed with 3222 * Make sure no one is running the old callback before we proceed with
@@ -3244,6 +3254,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3244 intel_engine_init_global_seqno(engine, 3254 intel_engine_init_global_seqno(engine,
3245 intel_engine_last_submit(engine)); 3255 intel_engine_last_submit(engine));
3246 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3256 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3257
3258 i915_gem_reset_finish_engine(engine);
3247 } 3259 }
3248 3260
3249 set_bit(I915_WEDGED, &i915->gpu_error.flags); 3261 set_bit(I915_WEDGED, &i915->gpu_error.flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4401068ff468..3ab1ace2a6bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
505 list_add_tail(&vma->exec_link, &eb->unbound); 505 list_add_tail(&vma->exec_link, &eb->unbound);
506 if (drm_mm_node_allocated(&vma->node)) 506 if (drm_mm_node_allocated(&vma->node))
507 err = i915_vma_unbind(vma); 507 err = i915_vma_unbind(vma);
508 if (unlikely(err))
509 vma->exec_flags = NULL;
508 } 510 }
509 return err; 511 return err;
510} 512}
@@ -2410,7 +2412,7 @@ err_request:
2410 if (out_fence) { 2412 if (out_fence) {
2411 if (err == 0) { 2413 if (err == 0) {
2412 fd_install(out_fence_fd, out_fence->file); 2414 fd_install(out_fence_fd, out_fence->file);
2413 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ 2415 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2414 args->rsvd2 |= (u64)out_fence_fd << 32; 2416 args->rsvd2 |= (u64)out_fence_fd << 32;
2415 out_fence_fd = -1; 2417 out_fence_fd = -1;
2416 } else { 2418 } else {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e09d18df8b7f..a3e93d46316a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
476 GEM_BUG_ON(!irqs_disabled()); 476 GEM_BUG_ON(!irqs_disabled());
477 lockdep_assert_held(&engine->timeline->lock); 477 lockdep_assert_held(&engine->timeline->lock);
478 478
479 trace_i915_gem_request_execute(request);
480
481 /* Transfer from per-context onto the global per-engine timeline */ 479 /* Transfer from per-context onto the global per-engine timeline */
482 timeline = engine->timeline; 480 timeline = engine->timeline;
483 GEM_BUG_ON(timeline == request->timeline); 481 GEM_BUG_ON(timeline == request->timeline);
@@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
501 list_move_tail(&request->link, &timeline->requests); 499 list_move_tail(&request->link, &timeline->requests);
502 spin_unlock(&request->timeline->lock); 500 spin_unlock(&request->timeline->lock);
503 501
502 trace_i915_gem_request_execute(request);
503
504 wake_up_all(&request->execute); 504 wake_up_all(&request->execute);
505} 505}
506 506
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0be50e43507d..f8fe5ffcdcff 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1303 */ 1303 */
1304 mutex_lock(&dev_priv->drm.struct_mutex); 1304 mutex_lock(&dev_priv->drm.struct_mutex);
1305 dev_priv->perf.oa.exclusive_stream = NULL; 1305 dev_priv->perf.oa.exclusive_stream = NULL;
1306 mutex_unlock(&dev_priv->drm.struct_mutex);
1307
1308 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1306 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
1307 mutex_unlock(&dev_priv->drm.struct_mutex);
1309 1308
1310 free_oa_buffer(dev_priv); 1309 free_oa_buffer(dev_priv);
1311 1310
@@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
1756 * Note: it's only the RCS/Render context that has any OA state. 1755 * Note: it's only the RCS/Render context that has any OA state.
1757 */ 1756 */
1758static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1757static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1759 const struct i915_oa_config *oa_config, 1758 const struct i915_oa_config *oa_config)
1760 bool interruptible)
1761{ 1759{
1762 struct i915_gem_context *ctx; 1760 struct i915_gem_context *ctx;
1763 int ret; 1761 int ret;
1764 unsigned int wait_flags = I915_WAIT_LOCKED; 1762 unsigned int wait_flags = I915_WAIT_LOCKED;
1765 1763
1766 if (interruptible) { 1764 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1767 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1768 if (ret)
1769 return ret;
1770
1771 wait_flags |= I915_WAIT_INTERRUPTIBLE;
1772 } else {
1773 mutex_lock(&dev_priv->drm.struct_mutex);
1774 }
1775 1765
1776 /* Switch away from any user context. */ 1766 /* Switch away from any user context. */
1777 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); 1767 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1819 } 1809 }
1820 1810
1821 out: 1811 out:
1822 mutex_unlock(&dev_priv->drm.struct_mutex);
1823
1824 return ret; 1812 return ret;
1825} 1813}
1826 1814
@@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1863 * to make sure all slices/subslices are ON before writing to NOA 1851 * to make sure all slices/subslices are ON before writing to NOA
1864 * registers. 1852 * registers.
1865 */ 1853 */
1866 ret = gen8_configure_all_contexts(dev_priv, oa_config, true); 1854 ret = gen8_configure_all_contexts(dev_priv, oa_config);
1867 if (ret) 1855 if (ret)
1868 return ret; 1856 return ret;
1869 1857
@@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1878static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1866static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1879{ 1867{
1880 /* Reset all contexts' slices/subslices configurations. */ 1868 /* Reset all contexts' slices/subslices configurations. */
1881 gen8_configure_all_contexts(dev_priv, NULL, false); 1869 gen8_configure_all_contexts(dev_priv, NULL);
1882 1870
1883 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1871 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1884 ~GT_NOA_ENABLE)); 1872 ~GT_NOA_ENABLE));
@@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1888static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1876static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1889{ 1877{
1890 /* Reset all contexts' slices/subslices configurations. */ 1878 /* Reset all contexts' slices/subslices configurations. */
1891 gen8_configure_all_contexts(dev_priv, NULL, false); 1879 gen8_configure_all_contexts(dev_priv, NULL);
1892 1880
1893 /* Make sure we disable noa to save power. */ 1881 /* Make sure we disable noa to save power. */
1894 I915_WRITE(RPM_CONFIG1, 1882 I915_WRITE(RPM_CONFIG1,
@@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2138 if (ret) 2126 if (ret)
2139 goto err_oa_buf_alloc; 2127 goto err_oa_buf_alloc;
2140 2128
2129 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2130 if (ret)
2131 goto err_lock;
2132
2141 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2133 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2142 stream->oa_config); 2134 stream->oa_config);
2143 if (ret) 2135 if (ret)
@@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2145 2137
2146 stream->ops = &i915_oa_stream_ops; 2138 stream->ops = &i915_oa_stream_ops;
2147 2139
2148 /* Lock device for exclusive_stream access late because
2149 * enable_metric_set() might lock as well on gen8+.
2150 */
2151 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2152 if (ret)
2153 goto err_lock;
2154
2155 dev_priv->perf.oa.exclusive_stream = stream; 2140 dev_priv->perf.oa.exclusive_stream = stream;
2156 2141
2157 mutex_unlock(&dev_priv->drm.struct_mutex); 2142 mutex_unlock(&dev_priv->drm.struct_mutex);
2158 2143
2159 return 0; 2144 return 0;
2160 2145
2161err_lock: 2146err_enable:
2162 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2147 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2148 mutex_unlock(&dev_priv->drm.struct_mutex);
2163 2149
2164err_enable: 2150err_lock:
2165 free_oa_buffer(dev_priv); 2151 free_oa_buffer(dev_priv);
2166 2152
2167err_oa_buf_alloc: 2153err_oa_buf_alloc:
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a2108e35c599..33eb0c5b1d32 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2027,7 +2027,7 @@ enum i915_power_well_id {
2027#define _CNL_PORT_TX_DW5_LN0_AE 0x162454 2027#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
2028#define _CNL_PORT_TX_DW5_LN0_B 0x162654 2028#define _CNL_PORT_TX_DW5_LN0_B 0x162654
2029#define _CNL_PORT_TX_DW5_LN0_C 0x162C54 2029#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
2030#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4 2030#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
2031#define _CNL_PORT_TX_DW5_LN0_F 0x162854 2031#define _CNL_PORT_TX_DW5_LN0_F 0x162854
2032#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ 2032#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
2033 _CNL_PORT_TX_DW5_GRP_AE, \ 2033 _CNL_PORT_TX_DW5_GRP_AE, \
@@ -2058,7 +2058,7 @@ enum i915_power_well_id {
2058#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C 2058#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
2059#define _CNL_PORT_TX_DW7_LN0_B 0x16265C 2059#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
2060#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C 2060#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
2061#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC 2061#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
2062#define _CNL_PORT_TX_DW7_LN0_F 0x16285C 2062#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
2063#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ 2063#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
2064 _CNL_PORT_TX_DW7_GRP_AE, \ 2064 _CNL_PORT_TX_DW7_GRP_AE, \
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index b33d2158c234..e5e6f6bb2b05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
304{ 304{
305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
306 struct intel_rps *rps = &dev_priv->gt_pm.rps; 306 struct intel_rps *rps = &dev_priv->gt_pm.rps;
307 u32 val; 307 bool boost = false;
308 ssize_t ret; 308 ssize_t ret;
309 u32 val;
309 310
310 ret = kstrtou32(buf, 0, &val); 311 ret = kstrtou32(buf, 0, &val);
311 if (ret) 312 if (ret)
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
317 return -EINVAL; 318 return -EINVAL;
318 319
319 mutex_lock(&dev_priv->pcu_lock); 320 mutex_lock(&dev_priv->pcu_lock);
320 rps->boost_freq = val; 321 if (val != rps->boost_freq) {
322 rps->boost_freq = val;
323 boost = atomic_read(&rps->num_waiters);
324 }
321 mutex_unlock(&dev_priv->pcu_lock); 325 mutex_unlock(&dev_priv->pcu_lock);
326 if (boost)
327 schedule_work(&rps->work);
322 328
323 return count; 329 return count;
324} 330}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 522d54fecb53..4a01f62a392d 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
779{ 779{
780 struct intel_encoder *encoder; 780 struct intel_encoder *encoder;
781 781
782 if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
783 return NULL;
784
785 /* MST */ 782 /* MST */
786 if (pipe >= 0) { 783 if (pipe >= 0) {
784 if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
785 return NULL;
786
787 encoder = dev_priv->av_enc_map[pipe]; 787 encoder = dev_priv->av_enc_map[pipe];
788 /* 788 /*
789 * when bootup, audio driver may not know it is 789 * when bootup, audio driver may not know it is
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 35c5299feab6..a29868cd30c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -620,19 +620,15 @@ static int
620bxt_power_sequencer_idx(struct intel_dp *intel_dp) 620bxt_power_sequencer_idx(struct intel_dp *intel_dp)
621{ 621{
622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
623 int backlight_controller = dev_priv->vbt.backlight.controller;
623 624
624 lockdep_assert_held(&dev_priv->pps_mutex); 625 lockdep_assert_held(&dev_priv->pps_mutex);
625 626
626 /* We should never land here with regular DP ports */ 627 /* We should never land here with regular DP ports */
627 WARN_ON(!intel_dp_is_edp(intel_dp)); 628 WARN_ON(!intel_dp_is_edp(intel_dp));
628 629
629 /*
630 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
631 * mapping needs to be retrieved from VBT, for now just hard-code to
632 * use instance #0 always.
633 */
634 if (!intel_dp->pps_reset) 630 if (!intel_dp->pps_reset)
635 return 0; 631 return backlight_controller;
636 632
637 intel_dp->pps_reset = false; 633 intel_dp->pps_reset = false;
638 634
@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
642 */ 638 */
643 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 639 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
644 640
645 return 0; 641 return backlight_controller;
646} 642}
647 643
648typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 644typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7ece2f061b9e..e0fca035ff78 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
719 struct rb_node *rb; 719 struct rb_node *rb;
720 unsigned long flags; 720 unsigned long flags;
721 721
722 GEM_TRACE("%s\n", engine->name);
723
722 spin_lock_irqsave(&engine->timeline->lock, flags); 724 spin_lock_irqsave(&engine->timeline->lock, flags);
723 725
724 /* Cancel the requests on the HW and clear the ELSP tracker. */ 726 /* Cancel the requests on the HW and clear the ELSP tracker. */
@@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
765 */ 767 */
766 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 768 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
767 769
770 /* Mark all CS interrupts as complete */
771 execlists->active = 0;
772
768 spin_unlock_irqrestore(&engine->timeline->lock, flags); 773 spin_unlock_irqrestore(&engine->timeline->lock, flags);
769} 774}
770 775
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 380f340204e8..debbbf0fd4bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nvif_object *device = &drm->client.device.object; 136 struct nvif_object *device = &drm->client.device.object;
137 int or = nv_encoder->or; 137 int or = ffs(nv_encoder->dcb->or) - 1;
138 u32 div = 1025; 138 u32 div = 1025;
139 u32 val; 139 u32 val;
140 140
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
149 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 149 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
151 struct nvif_object *device = &drm->client.device.object; 151 struct nvif_object *device = &drm->client.device.object;
152 int or = nv_encoder->or; 152 int or = ffs(nv_encoder->dcb->or) - 1;
153 u32 div = 1025; 153 u32 div = 1025;
154 u32 val = (bd->props.brightness * div) / 100; 154 u32 val = (bd->props.brightness * div) / 100;
155 155
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
170 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 170 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
172 struct nvif_object *device = &drm->client.device.object; 172 struct nvif_object *device = &drm->client.device.object;
173 int or = nv_encoder->or; 173 int or = ffs(nv_encoder->dcb->or) - 1;
174 u32 div, val; 174 u32 div, val;
175 175
176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
188 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 188 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
190 struct nvif_object *device = &drm->client.device.object; 190 struct nvif_object *device = &drm->client.device.object;
191 int or = nv_encoder->or; 191 int or = ffs(nv_encoder->dcb->or) - 1;
192 u32 div, val; 192 u32 div, val;
193 193
194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
228 return -ENODEV; 228 return -ENODEV;
229 } 229 }
230 230
231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
232 return 0; 232 return 0;
233 233
234 if (drm->client.device.info.chipset <= 0xa0 || 234 if (drm->client.device.info.chipset <= 0xa0 ||
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
268 struct nvif_device *device = &drm->client.device; 268 struct nvif_device *device = &drm->client.device;
269 struct drm_connector *connector; 269 struct drm_connector *connector;
270 270
271 INIT_LIST_HEAD(&drm->bl_connectors);
272
271 if (apple_gmux_present()) { 273 if (apple_gmux_present()) {
272 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); 274 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
273 return 0; 275 return 0;
274 } 276 }
275 277
276 INIT_LIST_HEAD(&drm->bl_connectors);
277
278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
280 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 280 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index dd8d4352ed99..caddce88d2d8 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev)
4477 nouveau_display(dev)->fini = nv50_display_fini; 4477 nouveau_display(dev)->fini = nv50_display_fini;
4478 disp->disp = &nouveau_display(dev)->disp; 4478 disp->disp = &nouveau_display(dev)->disp;
4479 dev->mode_config.funcs = &nv50_disp_func; 4479 dev->mode_config.funcs = &nv50_disp_func;
4480 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
4480 if (nouveau_atomic) 4481 if (nouveau_atomic)
4481 dev->driver->driver_features |= DRIVER_ATOMIC; 4482 dev->driver->driver_features |= DRIVER_ATOMIC;
4482 4483
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 93946dcee319..1c12e58f44c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1354 1354
1355 tail = this->addr + this->size; 1355 tail = this->addr + this->size;
1356 if (vmm->func->page_block && next && next->page != p) 1356 if (vmm->func->page_block && next && next->page != p)
1357 tail = ALIGN_DOWN(addr, vmm->func->page_block); 1357 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1358 1358
1359 if (addr <= tail && tail - addr >= size) { 1359 if (addr <= tail && tail - addr >= size) {
1360 rb_erase(&this->tree, &vmm->free); 1360 rb_erase(&this->tree, &vmm->free);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index d3045a371a55..7c73bc7e2f85 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3221 case CHIP_KAVERI: 3221 case CHIP_KAVERI:
3222 rdev->config.cik.max_shader_engines = 1; 3222 rdev->config.cik.max_shader_engines = 1;
3223 rdev->config.cik.max_tile_pipes = 4; 3223 rdev->config.cik.max_tile_pipes = 4;
3224 if ((rdev->pdev->device == 0x1304) || 3224 rdev->config.cik.max_cu_per_sh = 8;
3225 (rdev->pdev->device == 0x1305) || 3225 rdev->config.cik.max_backends_per_se = 2;
3226 (rdev->pdev->device == 0x130C) ||
3227 (rdev->pdev->device == 0x130F) ||
3228 (rdev->pdev->device == 0x1310) ||
3229 (rdev->pdev->device == 0x1311) ||
3230 (rdev->pdev->device == 0x131C)) {
3231 rdev->config.cik.max_cu_per_sh = 8;
3232 rdev->config.cik.max_backends_per_se = 2;
3233 } else if ((rdev->pdev->device == 0x1309) ||
3234 (rdev->pdev->device == 0x130A) ||
3235 (rdev->pdev->device == 0x130D) ||
3236 (rdev->pdev->device == 0x1313) ||
3237 (rdev->pdev->device == 0x131D)) {
3238 rdev->config.cik.max_cu_per_sh = 6;
3239 rdev->config.cik.max_backends_per_se = 2;
3240 } else if ((rdev->pdev->device == 0x1306) ||
3241 (rdev->pdev->device == 0x1307) ||
3242 (rdev->pdev->device == 0x130B) ||
3243 (rdev->pdev->device == 0x130E) ||
3244 (rdev->pdev->device == 0x1315) ||
3245 (rdev->pdev->device == 0x1318) ||
3246 (rdev->pdev->device == 0x131B)) {
3247 rdev->config.cik.max_cu_per_sh = 4;
3248 rdev->config.cik.max_backends_per_se = 1;
3249 } else {
3250 rdev->config.cik.max_cu_per_sh = 3;
3251 rdev->config.cik.max_backends_per_se = 1;
3252 }
3253 rdev->config.cik.max_sh_per_se = 1; 3226 rdev->config.cik.max_sh_per_se = 1;
3254 rdev->config.cik.max_texture_channel_caches = 4; 3227 rdev->config.cik.max_texture_channel_caches = 4;
3255 rdev->config.cik.max_gprs = 256; 3228 rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8d3e3d2e0090..7828a5e10629 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev,
1365 if ((rdev->flags & RADEON_IS_PCI) && 1365 if ((rdev->flags & RADEON_IS_PCI) &&
1366 (rdev->family <= CHIP_RS740)) 1366 (rdev->family <= CHIP_RS740))
1367 rdev->need_dma32 = true; 1367 rdev->need_dma32 = true;
1368#ifdef CONFIG_PPC64
1369 if (rdev->family == CHIP_CEDAR)
1370 rdev->need_dma32 = true;
1371#endif
1368 1372
1369 dma_bits = rdev->need_dma32 ? 32 : 40; 1373 dma_bits = rdev->need_dma32 ? 32 : 40;
1370 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 1374 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a9962ffba720..27d8e7dd2d06 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 35
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj); 37 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj); 38 radeon_bo_unref(&robj);
41 } 39 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 15404af9d740..31f5ad605e59 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82 mutex_unlock(&bo->rdev->gem.mutex); 82 mutex_unlock(&bo->rdev->gem.mutex);
83 radeon_bo_clear_surface_reg(bo); 83 radeon_bo_clear_surface_reg(bo);
84 WARN_ON_ONCE(!list_empty(&bo->va)); 84 WARN_ON_ONCE(!list_empty(&bo->va));
85 if (bo->gem_base.import_attach)
86 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
85 drm_gem_object_release(&bo->gem_base); 87 drm_gem_object_release(&bo->gem_base);
86 kfree(bo); 88 kfree(bo);
87} 89}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 326ad068c15a..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
47static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 47static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
48static void radeon_pm_update_profile(struct radeon_device *rdev); 48static void radeon_pm_update_profile(struct radeon_device *rdev);
49static void radeon_pm_set_clocks(struct radeon_device *rdev); 49static void radeon_pm_set_clocks(struct radeon_device *rdev);
50static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
51 50
52int radeon_pm_get_type_index(struct radeon_device *rdev, 51int radeon_pm_get_type_index(struct radeon_device *rdev,
53 enum radeon_pm_state_type ps_type, 52 enum radeon_pm_state_type ps_type,
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
80 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 79 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
81 } 80 }
82 mutex_unlock(&rdev->pm.mutex); 81 mutex_unlock(&rdev->pm.mutex);
83 /* allow new DPM state to be picked */
84 radeon_pm_compute_clocks_dpm(rdev);
85 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 82 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
86 if (rdev->pm.profile == PM_PROFILE_AUTO) { 83 if (rdev->pm.profile == PM_PROFILE_AUTO) {
87 mutex_lock(&rdev->pm.mutex); 84 mutex_lock(&rdev->pm.mutex);
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
885 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 882 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
886 /* balanced states don't exist at the moment */ 883 /* balanced states don't exist at the moment */
887 if (dpm_state == POWER_STATE_TYPE_BALANCED) 884 if (dpm_state == POWER_STATE_TYPE_BALANCED)
888 dpm_state = rdev->pm.dpm.ac_power ? 885 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
889 POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
890 886
891restart_search: 887restart_search:
892 /* Pick the best power state based on current conditions */ 888 /* Pick the best power state based on current conditions */
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 5decae0069d0..78cbc3145e44 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
93 93
94 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 94 DRM_DEBUG_DRIVER("Disabling the CRTC\n");
95 95
96 drm_crtc_vblank_off(crtc);
97
96 sun4i_tcon_set_status(scrtc->tcon, encoder, false); 98 sun4i_tcon_set_status(scrtc->tcon, encoder, false);
97 99
98 if (crtc->state->event && !crtc->state->active) { 100 if (crtc->state->event && !crtc->state->active) {
@@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
113 DRM_DEBUG_DRIVER("Enabling the CRTC\n"); 115 DRM_DEBUG_DRIVER("Enabling the CRTC\n");
114 116
115 sun4i_tcon_set_status(scrtc->tcon, encoder, true); 117 sun4i_tcon_set_status(scrtc->tcon, encoder, true);
118
119 drm_crtc_vblank_on(crtc);
116} 120}
117 121
118static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) 122static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 023f39bda633..e36004fbe453 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
132static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) 132static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
133{ 133{
134 struct sun4i_dclk *dclk = hw_to_dclk(hw); 134 struct sun4i_dclk *dclk = hw_to_dclk(hw);
135 u32 val = degrees / 120;
136
137 val <<= 28;
135 138
136 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, 139 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
137 GENMASK(29, 28), 140 GENMASK(29, 28),
138 degrees / 120); 141 val);
139 142
140 return 0; 143 return 0;
141} 144}
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 832f8f9bc47f..b8da5a50a61d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
92 92
93 DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 93 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
94 94
95 tcon->dclk_min_div = 6;
96 tcon->dclk_max_div = 127;
95 rounded_rate = clk_round_rate(tcon->dclk, rate); 97 rounded_rate = clk_round_rate(tcon->dclk, rate);
96 if (rounded_rate < rate) 98 if (rounded_rate < rate)
97 return MODE_CLOCK_LOW; 99 return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3c15cf24b503..2de586b7c98b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -101,10 +101,12 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
101 return; 101 return;
102 } 102 }
103 103
104 if (enabled) 104 if (enabled) {
105 clk_prepare_enable(clk); 105 clk_prepare_enable(clk);
106 else 106 } else {
107 clk_rate_exclusive_put(clk);
107 clk_disable_unprepare(clk); 108 clk_disable_unprepare(clk);
109 }
108} 110}
109 111
110static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, 112static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
@@ -260,7 +262,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
260 const struct drm_display_mode *mode) 262 const struct drm_display_mode *mode)
261{ 263{
262 /* Configure the dot clock */ 264 /* Configure the dot clock */
263 clk_set_rate(tcon->dclk, mode->crtc_clock * 1000); 265 clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
264 266
265 /* Set the resolution */ 267 /* Set the resolution */
266 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, 268 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -335,6 +337,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
335 regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, 337 regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
336 SUN4I_TCON_GCTL_IOMAP_MASK, 338 SUN4I_TCON_GCTL_IOMAP_MASK,
337 SUN4I_TCON_GCTL_IOMAP_TCON0); 339 SUN4I_TCON_GCTL_IOMAP_TCON0);
340
341 /* Enable the output on the pins */
342 regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
338} 343}
339 344
340static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 345static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
@@ -418,7 +423,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
418 WARN_ON(!tcon->quirks->has_channel_1); 423 WARN_ON(!tcon->quirks->has_channel_1);
419 424
420 /* Configure the dot clock */ 425 /* Configure the dot clock */
421 clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000); 426 clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
422 427
423 /* Adjust clock delay */ 428 /* Adjust clock delay */
424 clk_delay = sun4i_tcon_get_clk_delay(mode, 1); 429 clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -870,52 +875,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
870 return ret; 875 return ret;
871 } 876 }
872 877
873 /* 878 if (tcon->quirks->supports_lvds) {
874 * This can only be made optional since we've had DT nodes 879 /*
875 * without the LVDS reset properties. 880 * This can only be made optional since we've had DT
876 * 881 * nodes without the LVDS reset properties.
877 * If the property is missing, just disable LVDS, and print a 882 *
878 * warning. 883 * If the property is missing, just disable LVDS, and
879 */ 884 * print a warning.
880 tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); 885 */
881 if (IS_ERR(tcon->lvds_rst)) { 886 tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
882 dev_err(dev, "Couldn't get our reset line\n"); 887 if (IS_ERR(tcon->lvds_rst)) {
883 return PTR_ERR(tcon->lvds_rst); 888 dev_err(dev, "Couldn't get our reset line\n");
884 } else if (tcon->lvds_rst) { 889 return PTR_ERR(tcon->lvds_rst);
885 has_lvds_rst = true; 890 } else if (tcon->lvds_rst) {
886 reset_control_reset(tcon->lvds_rst); 891 has_lvds_rst = true;
887 } else { 892 reset_control_reset(tcon->lvds_rst);
888 has_lvds_rst = false; 893 } else {
889 } 894 has_lvds_rst = false;
895 }
890 896
891 /* 897 /*
892 * This can only be made optional since we've had DT nodes 898 * This can only be made optional since we've had DT
893 * without the LVDS reset properties. 899 * nodes without the LVDS reset properties.
894 * 900 *
895 * If the property is missing, just disable LVDS, and print a 901 * If the property is missing, just disable LVDS, and
896 * warning. 902 * print a warning.
897 */ 903 */
898 if (tcon->quirks->has_lvds_alt) { 904 if (tcon->quirks->has_lvds_alt) {
899 tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); 905 tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
900 if (IS_ERR(tcon->lvds_pll)) { 906 if (IS_ERR(tcon->lvds_pll)) {
901 if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { 907 if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
902 has_lvds_alt = false; 908 has_lvds_alt = false;
909 } else {
910 dev_err(dev, "Couldn't get the LVDS PLL\n");
911 return PTR_ERR(tcon->lvds_pll);
912 }
903 } else { 913 } else {
904 dev_err(dev, "Couldn't get the LVDS PLL\n"); 914 has_lvds_alt = true;
905 return PTR_ERR(tcon->lvds_pll);
906 } 915 }
907 } else {
908 has_lvds_alt = true;
909 } 916 }
910 }
911 917
912 if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { 918 if (!has_lvds_rst ||
913 dev_warn(dev, 919 (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
914 "Missing LVDS properties, Please upgrade your DT\n"); 920 dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
915 dev_warn(dev, "LVDS output disabled\n"); 921 dev_warn(dev, "LVDS output disabled\n");
916 can_lvds = false; 922 can_lvds = false;
923 } else {
924 can_lvds = true;
925 }
917 } else { 926 } else {
918 can_lvds = true; 927 can_lvds = false;
919 } 928 }
920 929
921 ret = sun4i_tcon_init_clocks(dev, tcon); 930 ret = sun4i_tcon_init_clocks(dev, tcon);
@@ -1134,7 +1143,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
1134}; 1143};
1135 1144
1136static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { 1145static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
1137 /* nothing is supported */ 1146 .supports_lvds = true,
1138}; 1147};
1139 1148
1140static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { 1149static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b761c7b823c5..278700c7bf9f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -175,6 +175,7 @@ struct sun4i_tcon_quirks {
175 bool has_channel_1; /* a33 does not have channel 1 */ 175 bool has_channel_1; /* a33 does not have channel 1 */
176 bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ 176 bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
177 bool needs_de_be_mux; /* sun6i needs mux to select backend */ 177 bool needs_de_be_mux; /* sun6i needs mux to select backend */
178 bool supports_lvds; /* Does the TCON support an LVDS output? */
178 179
179 /* callback to handle tcon muxing options */ 180 /* callback to handle tcon muxing options */
180 int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); 181 int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5720a0d4ac0a..677ac16c8a6d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
197 case VIRTGPU_PARAM_3D_FEATURES: 197 case VIRTGPU_PARAM_3D_FEATURES:
198 value = vgdev->has_virgl_3d == true ? 1 : 0; 198 value = vgdev->has_virgl_3d == true ? 1 : 0;
199 break; 199 break;
200 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
201 value = 1;
202 break;
200 default: 203 default:
201 return -EINVAL; 204 return -EINVAL;
202 } 205 }
@@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
472{ 475{
473 struct virtio_gpu_device *vgdev = dev->dev_private; 476 struct virtio_gpu_device *vgdev = dev->dev_private;
474 struct drm_virtgpu_get_caps *args = data; 477 struct drm_virtgpu_get_caps *args = data;
475 int size; 478 unsigned size, host_caps_size;
476 int i; 479 int i;
477 int found_valid = -1; 480 int found_valid = -1;
478 int ret; 481 int ret;
@@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
481 if (vgdev->num_capsets == 0) 484 if (vgdev->num_capsets == 0)
482 return -ENOSYS; 485 return -ENOSYS;
483 486
487 /* don't allow userspace to pass 0 */
488 if (args->size == 0)
489 return -EINVAL;
490
484 spin_lock(&vgdev->display_info_lock); 491 spin_lock(&vgdev->display_info_lock);
485 for (i = 0; i < vgdev->num_capsets; i++) { 492 for (i = 0; i < vgdev->num_capsets; i++) {
486 if (vgdev->capsets[i].id == args->cap_set_id) { 493 if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
496 return -EINVAL; 503 return -EINVAL;
497 } 504 }
498 505
499 size = vgdev->capsets[found_valid].max_size; 506 host_caps_size = vgdev->capsets[found_valid].max_size;
500 if (args->size > size) { 507 /* only copy to user the minimum of the host caps size or the guest caps size */
501 spin_unlock(&vgdev->display_info_lock); 508 size = min(args->size, host_caps_size);
502 return -EINVAL;
503 }
504 509
505 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 510 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
506 if (cache_ent->id == args->cap_set_id && 511 if (cache_ent->id == args->cap_set_id &&
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 1d8775799056..d9607905dc2f 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -233,6 +233,7 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
233 return -EOPNOTSUPP; 233 return -EOPNOTSUPP;
234 234
235 case STAT_TXDATA_NAK: 235 case STAT_TXDATA_NAK:
236 case STAT_BUS_ERROR:
236 return -EIO; 237 return -EIO;
237 case STAT_TXADDR_NAK: 238 case STAT_TXADDR_NAK:
238 case STAT_RXADDR_NAK: 239 case STAT_RXADDR_NAK:
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index a7ef19855bb8..9bb9f64fdda0 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -43,7 +43,7 @@
43#define TWSI_CTL_AAK 0x04 /* Assert ACK */ 43#define TWSI_CTL_AAK 0x04 /* Assert ACK */
44 44
45/* Status values */ 45/* Status values */
46#define STAT_ERROR 0x00 46#define STAT_BUS_ERROR 0x00
47#define STAT_START 0x08 47#define STAT_START 0x08
48#define STAT_REP_START 0x10 48#define STAT_REP_START 0x10
49#define STAT_TXADDR_ACK 0x18 49#define STAT_TXADDR_ACK 0x18
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 17fd55af4d92..caa20eb5f26b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data)
928{ 928{
929 struct gendisk *p = data; 929 struct gendisk *p = data;
930 930
931 if (!get_disk(p)) 931 if (!get_disk_and_module(p))
932 return -1; 932 return -1;
933 return 0; 933 return 0;
934} 934}
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5b4cf030c11..9183d148d644 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
550 dst_release(dst); 550 dst_release(dst);
551 } 551 }
552 552
553 if (ndev->flags & IFF_LOOPBACK) { 553 if (ndev) {
554 ret = rdma_translate_ip(dst_in, addr); 554 if (ndev->flags & IFF_LOOPBACK)
555 /* 555 ret = rdma_translate_ip(dst_in, addr);
556 * Put the loopback device and get the translated 556 else
557 * device instead. 557 addr->bound_dev_if = ndev->ifindex;
558 */
559 dev_put(ndev); 558 dev_put(ndev);
560 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
561 } else {
562 addr->bound_dev_if = ndev->ifindex;
563 } 559 }
564 dev_put(ndev);
565 560
566 return ret; 561 return ret;
567} 562}
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bc79ca8215d7..af5ad6a56ae4 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -17,6 +17,7 @@
17 17
18/* # of WCs to poll for with a single call to ib_poll_cq */ 18/* # of WCs to poll for with a single call to ib_poll_cq */
19#define IB_POLL_BATCH 16 19#define IB_POLL_BATCH 16
20#define IB_POLL_BATCH_DIRECT 8
20 21
21/* # of WCs to iterate over before yielding */ 22/* # of WCs to iterate over before yielding */
22#define IB_POLL_BUDGET_IRQ 256 23#define IB_POLL_BUDGET_IRQ 256
@@ -25,18 +26,18 @@
25#define IB_POLL_FLAGS \ 26#define IB_POLL_FLAGS \
26 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 27 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
27 28
28static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) 29static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
30 int batch)
29{ 31{
30 int i, n, completed = 0; 32 int i, n, completed = 0;
31 struct ib_wc *wcs = poll_wc ? : cq->wc;
32 33
33 /* 34 /*
34 * budget might be (-1) if the caller does not 35 * budget might be (-1) if the caller does not
35 * want to bound this call, thus we need unsigned 36 * want to bound this call, thus we need unsigned
36 * minimum here. 37 * minimum here.
37 */ 38 */
38 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, 39 while ((n = ib_poll_cq(cq, min_t(u32, batch,
39 budget - completed), wcs)) > 0) { 40 budget - completed), wcs)) > 0) {
40 for (i = 0; i < n; i++) { 41 for (i = 0; i < n; i++) {
41 struct ib_wc *wc = &wcs[i]; 42 struct ib_wc *wc = &wcs[i];
42 43
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
48 49
49 completed += n; 50 completed += n;
50 51
51 if (n != IB_POLL_BATCH || 52 if (n != batch || (budget != -1 && completed >= budget))
52 (budget != -1 && completed >= budget))
53 break; 53 break;
54 } 54 }
55 55
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
72 */ 72 */
73int ib_process_cq_direct(struct ib_cq *cq, int budget) 73int ib_process_cq_direct(struct ib_cq *cq, int budget)
74{ 74{
75 struct ib_wc wcs[IB_POLL_BATCH]; 75 struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
76 76
77 return __ib_process_cq(cq, budget, wcs); 77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
78} 78}
79EXPORT_SYMBOL(ib_process_cq_direct); 79EXPORT_SYMBOL(ib_process_cq_direct);
80 80
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
89 int completed; 89 int completed;
90 90
91 completed = __ib_process_cq(cq, budget, NULL); 91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
92 if (completed < budget) { 92 if (completed < budget) {
93 irq_poll_complete(&cq->iop); 93 irq_poll_complete(&cq->iop);
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
108 struct ib_cq *cq = container_of(work, struct ib_cq, work); 108 struct ib_cq *cq = container_of(work, struct ib_cq, work);
109 int completed; 109 int completed;
110 110
111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); 111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
112 IB_POLL_BATCH);
112 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 113 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
113 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 114 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
114 queue_work(ib_comp_wq, &cq->work); 115 queue_work(ib_comp_wq, &cq->work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e8010e73a1cf..bb065c9449be 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
536 ret = device->query_device(device, &device->attrs, &uhw); 536 ret = device->query_device(device, &device->attrs, &uhw);
537 if (ret) { 537 if (ret) {
538 pr_warn("Couldn't query the device attributes\n"); 538 pr_warn("Couldn't query the device attributes\n");
539 goto cache_cleanup; 539 goto cg_cleanup;
540 } 540 }
541 541
542 ret = ib_device_register_sysfs(device, port_callback); 542 ret = ib_device_register_sysfs(device, port_callback);
543 if (ret) { 543 if (ret) {
544 pr_warn("Couldn't register device %s with driver model\n", 544 pr_warn("Couldn't register device %s with driver model\n",
545 device->name); 545 device->name);
546 goto cache_cleanup; 546 goto cg_cleanup;
547 } 547 }
548 548
549 device->reg_state = IB_DEV_REGISTERED; 549 device->reg_state = IB_DEV_REGISTERED;
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
559 mutex_unlock(&device_mutex); 559 mutex_unlock(&device_mutex);
560 return 0; 560 return 0;
561 561
562cg_cleanup:
563 ib_device_unregister_rdmacg(device);
562cache_cleanup: 564cache_cleanup:
563 ib_cache_cleanup_one(device); 565 ib_cache_cleanup_one(device);
564 ib_cache_release_one(device); 566 ib_cache_release_one(device);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8cf15d4a8ac4..9f029a1ca5ea 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1291 1291
1292 resolved_dev = dev_get_by_index(dev_addr.net, 1292 resolved_dev = dev_get_by_index(dev_addr.net,
1293 dev_addr.bound_dev_if); 1293 dev_addr.bound_dev_if);
1294 if (resolved_dev->flags & IFF_LOOPBACK) { 1294 if (!resolved_dev) {
1295 dev_put(resolved_dev); 1295 dev_put(idev);
1296 resolved_dev = idev; 1296 return -ENODEV;
1297 dev_hold(resolved_dev);
1298 } 1297 }
1299 ndev = ib_get_ndev_from_path(rec); 1298 ndev = ib_get_ndev_from_path(rec);
1300 rcu_read_lock(); 1299 rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index f015f1bf88c9..3a9d0f5b5881 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1149,6 +1149,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1150 return -EFAULT; 1150 return -EFAULT;
1151 1151
1152 if (cmd.qp_state > IB_QPS_ERR)
1153 return -EINVAL;
1154
1152 ctx = ucma_get_ctx(file, cmd.id); 1155 ctx = ucma_get_ctx(file, cmd.id);
1153 if (IS_ERR(ctx)) 1156 if (IS_ERR(ctx))
1154 return PTR_ERR(ctx); 1157 return PTR_ERR(ctx);
@@ -1294,6 +1297,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1294 if (IS_ERR(ctx)) 1297 if (IS_ERR(ctx))
1295 return PTR_ERR(ctx); 1298 return PTR_ERR(ctx);
1296 1299
1300 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
1301 return -EINVAL;
1302
1297 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1303 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1298 cmd.optlen); 1304 cmd.optlen);
1299 if (IS_ERR(optval)) { 1305 if (IS_ERR(optval)) {
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 643174d949a8..0dd75f449872 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
785 return 0; 785 return 0;
786} 786}
787 787
788static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) 788unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790{ 790{
791 unsigned long flags; 791 unsigned long flags;
@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
799 return flags; 799 return flags;
800} 800}
801 801
802static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, 802void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803 unsigned long flags) 803 unsigned long flags)
804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805{ 805{
806 if (qp->rcq != qp->scq) 806 if (qp->rcq != qp->scq)
@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1606 int status; 1606 int status;
1607 union ib_gid sgid; 1607 union ib_gid sgid;
1608 struct ib_gid_attr sgid_attr; 1608 struct ib_gid_attr sgid_attr;
1609 unsigned int flags;
1609 u8 nw_type; 1610 u8 nw_type;
1610 1611
1611 qp->qplib_qp.modify_flags = 0; 1612 qp->qplib_qp.modify_flags = 0;
@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1634 dev_dbg(rdev_to_dev(rdev), 1635 dev_dbg(rdev_to_dev(rdev),
1635 "Move QP = %p to flush list\n", 1636 "Move QP = %p to flush list\n",
1636 qp); 1637 qp);
1638 flags = bnxt_re_lock_cqs(qp);
1637 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1639 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1640 bnxt_re_unlock_cqs(qp, flags);
1638 } 1641 }
1639 if (!qp->sumem && 1642 if (!qp->sumem &&
1640 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 1643 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1641 dev_dbg(rdev_to_dev(rdev), 1644 dev_dbg(rdev_to_dev(rdev),
1642 "Move QP = %p out of flush list\n", 1645 "Move QP = %p out of flush list\n",
1643 qp); 1646 qp);
1647 flags = bnxt_re_lock_cqs(qp);
1644 bnxt_qplib_clean_qp(&qp->qplib_qp); 1648 bnxt_qplib_clean_qp(&qp->qplib_qp);
1649 bnxt_re_unlock_cqs(qp, flags);
1645 } 1650 }
1646 } 1651 }
1647 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1652 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2227 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; 2232 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2228 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; 2233 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2229 2234
2235 /* Need unconditional fence for local invalidate
2236 * opcode to work as expected.
2237 */
2238 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2239
2230 if (wr->send_flags & IB_SEND_SIGNALED) 2240 if (wr->send_flags & IB_SEND_SIGNALED)
2231 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2241 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2232 if (wr->send_flags & IB_SEND_FENCE)
2233 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2234 if (wr->send_flags & IB_SEND_SOLICITED) 2242 if (wr->send_flags & IB_SEND_SOLICITED)
2235 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2243 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2236 2244
@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2251 wqe->frmr.levels = qplib_frpl->hwq.level + 1; 2259 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2252 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; 2260 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2253 2261
2254 if (wr->wr.send_flags & IB_SEND_FENCE) 2262 /* Need unconditional fence for reg_mr
2255 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2263 * opcode to function as expected.
2264 */
2265
2266 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2267
2256 if (wr->wr.send_flags & IB_SEND_SIGNALED) 2268 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2257 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2269 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2258 2270
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b88a48d43a9d..e62b7c2c7da6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
222 struct ib_udata *udata); 222 struct ib_udata *udata);
223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); 223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
225
226unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
227void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
225#endif /* __BNXT_RE_IB_VERBS_H__ */ 228#endif /* __BNXT_RE_IB_VERBS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 33a448036c2e..f6e361750466 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
730 struct bnxt_re_qp *qp) 730 struct bnxt_re_qp *qp)
731{ 731{
732 struct ib_event event; 732 struct ib_event event;
733 unsigned int flags;
734
735 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
736 flags = bnxt_re_lock_cqs(qp);
737 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
738 bnxt_re_unlock_cqs(qp, flags);
739 }
733 740
734 memset(&event, 0, sizeof(event)); 741 memset(&event, 0, sizeof(event));
735 if (qp->qplib_qp.srq) { 742 if (qp->qplib_qp.srq) {
@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
1416 switch (re_work->event) { 1423 switch (re_work->event) {
1417 case NETDEV_REGISTER: 1424 case NETDEV_REGISTER:
1418 rc = bnxt_re_ib_reg(rdev); 1425 rc = bnxt_re_ib_reg(rdev);
1419 if (rc) 1426 if (rc) {
1420 dev_err(rdev_to_dev(rdev), 1427 dev_err(rdev_to_dev(rdev),
1421 "Failed to register with IB: %#x", rc); 1428 "Failed to register with IB: %#x", rc);
1429 bnxt_re_remove_one(rdev);
1430 bnxt_re_dev_unreg(rdev);
1431 }
1422 break; 1432 break;
1423 case NETDEV_UP: 1433 case NETDEV_UP:
1424 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1434 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3ea5b9624f6b..06b42c880fd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
88 } 88 }
89} 89}
90 90
91void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 91static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 unsigned long *flags) 92 unsigned long *flags)
93 __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) 93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94{ 94{
95 spin_lock_irqsave(&qp->scq->hwq.lock, *flags); 95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq) 96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->hwq.lock); 97 __acquire(&qp->rcq->flush_lock);
98 else 98 else
99 spin_lock(&qp->rcq->hwq.lock); 99 spin_lock(&qp->rcq->flush_lock);
100} 100}
101 101
102void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 102static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags) 103 unsigned long *flags)
104 __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) 104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105{ 105{
106 if (qp->scq == qp->rcq) 106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->hwq.lock); 107 __release(&qp->rcq->flush_lock);
108 else 108 else
109 spin_unlock(&qp->rcq->hwq.lock); 109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); 110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111}
112
113static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
114 struct bnxt_qplib_cq *cq)
115{
116 struct bnxt_qplib_cq *buddy_cq = NULL;
117
118 if (qp->scq == qp->rcq)
119 buddy_cq = NULL;
120 else if (qp->scq == cq)
121 buddy_cq = qp->rcq;
122 else
123 buddy_cq = qp->scq;
124 return buddy_cq;
125}
126
127static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
128 struct bnxt_qplib_cq *cq)
129 __acquires(&buddy_cq->hwq.lock)
130{
131 struct bnxt_qplib_cq *buddy_cq = NULL;
132
133 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
134 if (!buddy_cq)
135 __acquire(&cq->hwq.lock);
136 else
137 spin_lock(&buddy_cq->hwq.lock);
138}
139
140static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
141 struct bnxt_qplib_cq *cq)
142 __releases(&buddy_cq->hwq.lock)
143{
144 struct bnxt_qplib_cq *buddy_cq = NULL;
145
146 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
147 if (!buddy_cq)
148 __release(&cq->hwq.lock);
149 else
150 spin_unlock(&buddy_cq->hwq.lock);
151} 111}
152 112
153void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 113void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
154{ 114{
155 unsigned long flags; 115 unsigned long flags;
156 116
157 bnxt_qplib_acquire_cq_locks(qp, &flags); 117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
158 __bnxt_qplib_add_flush_qp(qp); 118 __bnxt_qplib_add_flush_qp(qp);
159 bnxt_qplib_release_cq_locks(qp, &flags); 119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
160} 120}
161 121
162static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 122static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
177{ 137{
178 unsigned long flags; 138 unsigned long flags;
179 139
180 bnxt_qplib_acquire_cq_locks(qp, &flags); 140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
181 __clean_cq(qp->scq, (u64)(unsigned long)qp); 141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
182 qp->sq.hwq.prod = 0; 142 qp->sq.hwq.prod = 0;
183 qp->sq.hwq.cons = 0; 143 qp->sq.hwq.cons = 0;
@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
186 qp->rq.hwq.cons = 0; 146 qp->rq.hwq.cons = 0;
187 147
188 __bnxt_qplib_del_flush_qp(qp); 148 __bnxt_qplib_del_flush_qp(qp);
189 bnxt_qplib_release_cq_locks(qp, &flags); 149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
190} 150}
191 151
192static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 152static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
2107 /* Must block new posting of SQ and RQ */ 2067 /* Must block new posting of SQ and RQ */
2108 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2068 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2109 bnxt_qplib_cancel_phantom_processing(qp); 2069 bnxt_qplib_cancel_phantom_processing(qp);
2110
2111 /* Add qp to flush list of the CQ */
2112 __bnxt_qplib_add_flush_qp(qp);
2113} 2070}
2114 2071
2115/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2072/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2285 sw_sq_cons, cqe->wr_id, cqe->status); 2242 sw_sq_cons, cqe->wr_id, cqe->status);
2286 cqe++; 2243 cqe++;
2287 (*budget)--; 2244 (*budget)--;
2288 bnxt_qplib_lock_buddy_cq(qp, cq);
2289 bnxt_qplib_mark_qp_error(qp); 2245 bnxt_qplib_mark_qp_error(qp);
2290 bnxt_qplib_unlock_buddy_cq(qp, cq); 2246 /* Add qp to flush list of the CQ */
2247 bnxt_qplib_add_flush_qp(qp);
2291 } else { 2248 } else {
2292 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2249 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2293 /* Before we complete, do WA 9060 */ 2250 /* Before we complete, do WA 9060 */
@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2403 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2360 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2404 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2361 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2405 /* Add qp to flush list of the CQ */ 2362 /* Add qp to flush list of the CQ */
2406 bnxt_qplib_lock_buddy_cq(qp, cq); 2363 bnxt_qplib_add_flush_qp(qp);
2407 __bnxt_qplib_add_flush_qp(qp);
2408 bnxt_qplib_unlock_buddy_cq(qp, cq);
2409 } 2364 }
2410 } 2365 }
2411 2366
@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2489 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2444 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2490 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2445 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2491 /* Add qp to flush list of the CQ */ 2446 /* Add qp to flush list of the CQ */
2492 bnxt_qplib_lock_buddy_cq(qp, cq); 2447 bnxt_qplib_add_flush_qp(qp);
2493 __bnxt_qplib_add_flush_qp(qp);
2494 bnxt_qplib_unlock_buddy_cq(qp, cq);
2495 } 2448 }
2496 } 2449 }
2497done: 2450done:
@@ -2501,11 +2454,9 @@ done:
2501bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2454bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2502{ 2455{
2503 struct cq_base *hw_cqe, **hw_cqe_ptr; 2456 struct cq_base *hw_cqe, **hw_cqe_ptr;
2504 unsigned long flags;
2505 u32 sw_cons, raw_cons; 2457 u32 sw_cons, raw_cons;
2506 bool rc = true; 2458 bool rc = true;
2507 2459
2508 spin_lock_irqsave(&cq->hwq.lock, flags);
2509 raw_cons = cq->hwq.cons; 2460 raw_cons = cq->hwq.cons;
2510 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2461 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2511 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 2462 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2513 2464
2514 /* Check for Valid bit. If the CQE is valid, return false */ 2465 /* Check for Valid bit. If the CQE is valid, return false */
2515 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); 2466 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2516 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2517 return rc; 2467 return rc;
2518} 2468}
2519 2469
@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2602 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2552 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2603 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2553 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2604 /* Add qp to flush list of the CQ */ 2554 /* Add qp to flush list of the CQ */
2605 bnxt_qplib_lock_buddy_cq(qp, cq); 2555 bnxt_qplib_add_flush_qp(qp);
2606 __bnxt_qplib_add_flush_qp(qp);
2607 bnxt_qplib_unlock_buddy_cq(qp, cq);
2608 } 2556 }
2609 } 2557 }
2610 2558
@@ -2719,9 +2667,7 @@ do_rq:
2719 */ 2667 */
2720 2668
2721 /* Add qp to flush list of the CQ */ 2669 /* Add qp to flush list of the CQ */
2722 bnxt_qplib_lock_buddy_cq(qp, cq); 2670 bnxt_qplib_add_flush_qp(qp);
2723 __bnxt_qplib_add_flush_qp(qp);
2724 bnxt_qplib_unlock_buddy_cq(qp, cq);
2725done: 2671done:
2726 return rc; 2672 return rc;
2727} 2673}
@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2750 u32 budget = num_cqes; 2696 u32 budget = num_cqes;
2751 unsigned long flags; 2697 unsigned long flags;
2752 2698
2753 spin_lock_irqsave(&cq->hwq.lock, flags); 2699 spin_lock_irqsave(&cq->flush_lock, flags);
2754 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 2700 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2755 dev_dbg(&cq->hwq.pdev->dev, 2701 dev_dbg(&cq->hwq.pdev->dev,
2756 "QPLIB: FP: Flushing SQ QP= %p", 2702 "QPLIB: FP: Flushing SQ QP= %p",
@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2764 qp); 2710 qp);
2765 __flush_rq(&qp->rq, qp, &cqe, &budget); 2711 __flush_rq(&qp->rq, qp, &cqe, &budget);
2766 } 2712 }
2767 spin_unlock_irqrestore(&cq->hwq.lock, flags); 2713 spin_unlock_irqrestore(&cq->flush_lock, flags);
2768 2714
2769 return num_cqes - budget; 2715 return num_cqes - budget;
2770} 2716}
@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2773 int num_cqes, struct bnxt_qplib_qp **lib_qp) 2719 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2774{ 2720{
2775 struct cq_base *hw_cqe, **hw_cqe_ptr; 2721 struct cq_base *hw_cqe, **hw_cqe_ptr;
2776 unsigned long flags;
2777 u32 sw_cons, raw_cons; 2722 u32 sw_cons, raw_cons;
2778 int budget, rc = 0; 2723 int budget, rc = 0;
2779 2724
2780 spin_lock_irqsave(&cq->hwq.lock, flags);
2781 raw_cons = cq->hwq.cons; 2725 raw_cons = cq->hwq.cons;
2782 budget = num_cqes; 2726 budget = num_cqes;
2783 2727
@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2853 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); 2797 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2854 } 2798 }
2855exit: 2799exit:
2856 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2857 return num_cqes - budget; 2800 return num_cqes - budget;
2858} 2801}
2859 2802
2860void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 2803void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2861{ 2804{
2862 unsigned long flags;
2863
2864 spin_lock_irqsave(&cq->hwq.lock, flags);
2865 if (arm_type) 2805 if (arm_type)
2866 bnxt_qplib_arm_cq(cq, arm_type); 2806 bnxt_qplib_arm_cq(cq, arm_type);
2867 /* Using cq->arm_state variable to track whether to issue cq handler */ 2807 /* Using cq->arm_state variable to track whether to issue cq handler */
2868 atomic_set(&cq->arm_state, 1); 2808 atomic_set(&cq->arm_state, 1);
2869 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2870} 2809}
2871 2810
2872void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 2811void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ca0a2ffa3509..ade9f13c0fd1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
389 struct list_head sqf_head, rqf_head; 389 struct list_head sqf_head, rqf_head;
390 atomic_t arm_state; 390 atomic_t arm_state;
391 spinlock_t compl_lock; /* synch CQ handlers */ 391 spinlock_t compl_lock; /* synch CQ handlers */
392/* Locking Notes:
393 * QP can move to error state from modify_qp, async error event or error
394 * CQE as part of poll_cq. When QP is moved to error state, it gets added
395 * to two flush lists, one each for SQ and RQ.
396 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
397 * flush_locks should be acquired when QP is moved to error. The control path
398 * operations(modify_qp and async error events) are synchronized with poll_cq
399 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
400 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
401 * of the same QP while manipulating the flush list.
402 */
403 spinlock_t flush_lock; /* QP flush management */
392}; 404};
393 405
394#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 406#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 8329ec6a7946..80027a494730 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
305 err_event->res_err_state_reason); 305 err_event->res_err_state_reason);
306 if (!qp) 306 if (!qp)
307 break; 307 break;
308 bnxt_qplib_acquire_cq_locks(qp, &flags);
309 bnxt_qplib_mark_qp_error(qp); 308 bnxt_qplib_mark_qp_error(qp);
310 bnxt_qplib_release_cq_locks(qp, &flags); 309 rcfw->aeq_handler(rcfw, qp_event, qp);
311 break; 310 break;
312 default: 311 default:
313 /* Command Response */ 312 /* Command Response */
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
460 int rc; 459 int rc;
461 460
462 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 461 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
463 462 /* Supply (log-base-2-of-host-page-size - base-page-shift)
463 * to bono to adjust the doorbell page sizes.
464 */
465 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
466 RCFW_DBR_BASE_PAGE_SHIFT);
464 /* 467 /*
465 * VFs need not setup the HW context area, PF 468 * VFs need not setup the HW context area, PF
466 * shall setup this area for VF. Skipping the 469 * shall setup this area for VF. Skipping the
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 6bee6e3636ea..c7cce2e4185e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -49,6 +49,7 @@
49#define RCFW_COMM_SIZE 0x104 49#define RCFW_COMM_SIZE 0x104
50 50
51#define RCFW_DBR_PCI_BAR_REGION 2 51#define RCFW_DBR_PCI_BAR_REGION 2
52#define RCFW_DBR_BASE_PAGE_SHIFT 12
52 53
53#define RCFW_CMD_PREP(req, CMD, cmd_flags) \ 54#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
54 do { \ 55 do { \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 03057983341f..ee98e5efef84 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
139 attr->max_pkey = le32_to_cpu(sb->max_pkeys); 139 attr->max_pkey = le32_to_cpu(sb->max_pkeys);
140 140
141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data); 141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
142 attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; 142 attr->l2_db_size = (sb->l2_db_space_size + 1) *
143 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
143 attr->max_sgid = le32_to_cpu(sb->max_gid); 144 attr->max_sgid = le32_to_cpu(sb->max_gid);
144 145
145 bnxt_qplib_query_version(rcfw, attr->fw_ver); 146 bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 2d7ea096a247..3e5a4f760d0e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) 1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) 1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) 1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
1764 __le16 reserved16; 1764 /* This value is (log-base-2-of-DBR-page-size - 12).
1765 * 0 for 4KB. HW supported values are enumerated below.
1766 */
1767 __le16 log2_dbr_pg_size;
1768 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
1769 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
1770 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
1771 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
1772 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
1773 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
1774 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
1775 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
1776 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
1777 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
1778 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
1779 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
1780 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
1781 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
1782 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
1783 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
1784 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
1785 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
1786 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
1787 CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
1765 __le64 qpc_page_dir; 1788 __le64 qpc_page_dir;
1766 __le64 mrw_page_dir; 1789 __le64 mrw_page_dir;
1767 __le64 srq_page_dir; 1790 __le64 srq_page_dir;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9a566ee3ceff..82adc0d1d30e 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
601 wc->dlid_path_bits = 0; 601 wc->dlid_path_bits = 0;
602 602
603 if (is_eth) { 603 if (is_eth) {
604 wc->slid = 0;
604 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); 605 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
605 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); 606 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
606 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); 607 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -851,7 +852,6 @@ repoll:
851 } 852 }
852 } 853 }
853 854
854 wc->slid = be16_to_cpu(cqe->rlid);
855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
856 wc->src_qp = g_mlpath_rqpn & 0xffffff; 856 wc->src_qp = g_mlpath_rqpn & 0xffffff;
857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -860,6 +860,7 @@ repoll:
860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
862 if (is_eth) { 862 if (is_eth) {
863 wc->slid = 0;
863 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 864 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
864 if (be32_to_cpu(cqe->vlan_my_qpn) & 865 if (be32_to_cpu(cqe->vlan_my_qpn) &
865 MLX4_CQE_CVLAN_PRESENT_MASK) { 866 MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -871,6 +872,7 @@ repoll:
871 memcpy(wc->smac, cqe->smac, ETH_ALEN); 872 memcpy(wc->smac, cqe->smac, ETH_ALEN);
872 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); 873 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
873 } else { 874 } else {
875 wc->slid = be16_to_cpu(cqe->rlid);
874 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
875 wc->vlan_id = 0xffff; 877 wc->vlan_id = 0xffff;
876 } 878 }
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2ee9322f2e..5a0e4fc4785a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
219 gid_tbl[i].version = 2; 219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) 220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1; 221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 } 222 }
225 } 223 }
226 224
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
366 if (!gids) { 364 if (!gids) {
367 ret = -ENOMEM; 365 ret = -ENOMEM;
368 } else { 366 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) 367 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 368 memcpy(&gids[i].gid,
369 &port_gid_table->gids[i].gid,
370 sizeof(union ib_gid));
371 gids[i].gid_type =
372 port_gid_table->gids[i].gid_type;
373 }
371 } 374 }
372 } 375 }
373 spin_unlock_bh(&iboe->lock); 376 spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5b974fb97611..15457c9569a7 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
226 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 226 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
227 break; 227 break;
228 } 228 }
229 wc->slid = be16_to_cpu(cqe->slid);
230 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 229 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
231 wc->dlid_path_bits = cqe->ml_path; 230 wc->dlid_path_bits = cqe->ml_path;
232 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 231 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
241 } 240 }
242 241
243 if (ll != IB_LINK_LAYER_ETHERNET) { 242 if (ll != IB_LINK_LAYER_ETHERNET) {
243 wc->slid = be16_to_cpu(cqe->slid);
244 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 244 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
245 return; 245 return;
246 } 246 }
247 247
248 wc->slid = 0;
248 vlan_present = cqe->l4_l3_hdr_type & 0x1; 249 vlan_present = cqe->l4_l3_hdr_type & 0x1;
249 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 250 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
250 if (vlan_present) { 251 if (vlan_present) {
@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1177 if (ucmd.reserved0 || ucmd.reserved1) 1178 if (ucmd.reserved0 || ucmd.reserved1)
1178 return -EINVAL; 1179 return -EINVAL;
1179 1180
1180 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1181 /* check multiplication overflow */
1182 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1183 return -EINVAL;
1184
1185 umem = ib_umem_get(context, ucmd.buf_addr,
1186 (size_t)ucmd.cqe_size * entries,
1181 IB_ACCESS_LOCAL_WRITE, 1); 1187 IB_ACCESS_LOCAL_WRITE, 1);
1182 if (IS_ERR(umem)) { 1188 if (IS_ERR(umem)) {
1183 err = PTR_ERR(umem); 1189 err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4236c8086820..033b6af90de9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
245 struct mlx5_ib_multiport_info *mpi; 245 struct mlx5_ib_multiport_info *mpi;
246 struct mlx5_ib_port *port; 246 struct mlx5_ib_port *port;
247 247
248 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
249 ll != IB_LINK_LAYER_ETHERNET) {
250 if (native_port_num)
251 *native_port_num = ib_port_num;
252 return ibdev->mdev;
253 }
254
248 if (native_port_num) 255 if (native_port_num)
249 *native_port_num = 1; 256 *native_port_num = 1;
250 257
251 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
252 return ibdev->mdev;
253
254 port = &ibdev->port[ib_port_num - 1]; 258 port = &ibdev->port[ib_port_num - 1];
255 if (!port) 259 if (!port)
256 return NULL; 260 return NULL;
@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3263 struct mlx5_ib_dev *ibdev; 3267 struct mlx5_ib_dev *ibdev;
3264 struct ib_event ibev; 3268 struct ib_event ibev;
3265 bool fatal = false; 3269 bool fatal = false;
3266 u8 port = 0; 3270 u8 port = (u8)work->param;
3267 3271
3268 if (mlx5_core_is_mp_slave(work->dev)) { 3272 if (mlx5_core_is_mp_slave(work->dev)) {
3269 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 3273 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3283 case MLX5_DEV_EVENT_PORT_UP: 3287 case MLX5_DEV_EVENT_PORT_UP:
3284 case MLX5_DEV_EVENT_PORT_DOWN: 3288 case MLX5_DEV_EVENT_PORT_DOWN:
3285 case MLX5_DEV_EVENT_PORT_INITIALIZED: 3289 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3286 port = (u8)work->param;
3287
3288 /* In RoCE, port up/down events are handled in 3290 /* In RoCE, port up/down events are handled in
3289 * mlx5_netdev_event(). 3291 * mlx5_netdev_event().
3290 */ 3292 */
@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3298 3300
3299 case MLX5_DEV_EVENT_LID_CHANGE: 3301 case MLX5_DEV_EVENT_LID_CHANGE:
3300 ibev.event = IB_EVENT_LID_CHANGE; 3302 ibev.event = IB_EVENT_LID_CHANGE;
3301 port = (u8)work->param;
3302 break; 3303 break;
3303 3304
3304 case MLX5_DEV_EVENT_PKEY_CHANGE: 3305 case MLX5_DEV_EVENT_PKEY_CHANGE:
3305 ibev.event = IB_EVENT_PKEY_CHANGE; 3306 ibev.event = IB_EVENT_PKEY_CHANGE;
3306 port = (u8)work->param;
3307
3308 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 3307 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3309 break; 3308 break;
3310 3309
3311 case MLX5_DEV_EVENT_GUID_CHANGE: 3310 case MLX5_DEV_EVENT_GUID_CHANGE:
3312 ibev.event = IB_EVENT_GID_CHANGE; 3311 ibev.event = IB_EVENT_GID_CHANGE;
3313 port = (u8)work->param;
3314 break; 3312 break;
3315 3313
3316 case MLX5_DEV_EVENT_CLIENT_REREG: 3314 case MLX5_DEV_EVENT_CLIENT_REREG:
3317 ibev.event = IB_EVENT_CLIENT_REREGISTER; 3315 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3318 port = (u8)work->param;
3319 break; 3316 break;
3320 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 3317 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3321 schedule_work(&ibdev->delay_drop.delay_drop_work); 3318 schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3327 ibev.device = &ibdev->ib_dev; 3324 ibev.device = &ibdev->ib_dev;
3328 ibev.element.port_num = port; 3325 ibev.element.port_num = port;
3329 3326
3330 if (port < 1 || port > ibdev->num_ports) { 3327 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3331 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 3328 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3332 goto out; 3329 goto out;
3333 } 3330 }
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 556e015678de..1961c6a45437 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1816,7 +1816,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1816 1816
1817 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1817 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1818 mr->ibmr.length = 0; 1818 mr->ibmr.length = 0;
1819 mr->ndescs = sg_nents;
1820 1819
1821 for_each_sg(sgl, sg, sg_nents, i) { 1820 for_each_sg(sgl, sg, sg_nents, i) {
1822 if (unlikely(i >= mr->max_descs)) 1821 if (unlikely(i >= mr->max_descs))
@@ -1828,6 +1827,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1828 1827
1829 sg_offset = 0; 1828 sg_offset = 0;
1830 } 1829 }
1830 mr->ndescs = i;
1831 1831
1832 if (sg_offset_p) 1832 if (sg_offset_p)
1833 *sg_offset_p = sg_offset; 1833 *sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 39d24bf694a8..36197fbac63a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1584,6 +1584,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1584 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1584 u32 uidx = MLX5_IB_DEFAULT_UIDX;
1585 struct mlx5_ib_create_qp ucmd; 1585 struct mlx5_ib_create_qp ucmd;
1586 struct mlx5_ib_qp_base *base; 1586 struct mlx5_ib_qp_base *base;
1587 int mlx5_st;
1587 void *qpc; 1588 void *qpc;
1588 u32 *in; 1589 u32 *in;
1589 int err; 1590 int err;
@@ -1592,6 +1593,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1592 spin_lock_init(&qp->sq.lock); 1593 spin_lock_init(&qp->sq.lock);
1593 spin_lock_init(&qp->rq.lock); 1594 spin_lock_init(&qp->rq.lock);
1594 1595
1596 mlx5_st = to_mlx5_st(init_attr->qp_type);
1597 if (mlx5_st < 0)
1598 return -EINVAL;
1599
1595 if (init_attr->rwq_ind_tbl) { 1600 if (init_attr->rwq_ind_tbl) {
1596 if (!udata) 1601 if (!udata)
1597 return -ENOSYS; 1602 return -ENOSYS;
@@ -1753,7 +1758,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1753 1758
1754 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1759 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1755 1760
1756 MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); 1761 MLX5_SET(qpc, qpc, st, mlx5_st);
1757 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1762 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1758 1763
1759 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1764 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -3095,8 +3100,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3095 goto out; 3100 goto out;
3096 3101
3097 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 3102 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3098 !optab[mlx5_cur][mlx5_new]) 3103 !optab[mlx5_cur][mlx5_new]) {
3104 err = -EINVAL;
3099 goto out; 3105 goto out;
3106 }
3100 3107
3101 op = optab[mlx5_cur][mlx5_new]; 3108 op = optab[mlx5_cur][mlx5_new];
3102 optpar = ib_mask_to_mlx5_opt(attr_mask); 3109 optpar = ib_mask_to_mlx5_opt(attr_mask);
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 478b7317b80a..26dc374787f7 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
458 } 458 }
459 return -EINVAL; 459 return -EINVAL;
460 } 460 }
461 neigh = dst_neigh_lookup(dst, &dst_in); 461 neigh = dst_neigh_lookup(dst, &fl6.daddr);
462
463 if (neigh) { 462 if (neigh) {
464 rcu_read_lock(); 463 rcu_read_lock();
465 if (neigh->nud_state & NUD_VALID) { 464 if (neigh->nud_state & NUD_VALID) {
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
494 493
495 qp = idr_find(&dev->qpidr, conn_param->qpn); 494 qp = idr_find(&dev->qpidr, conn_param->qpn);
496 495
497 laddr = (struct sockaddr_in *)&cm_id->local_addr; 496 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
498 raddr = (struct sockaddr_in *)&cm_id->remote_addr; 497 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
499 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 498 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
500 raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 499 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
500
501 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
502 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
503 ntohs(raddr->sin_port));
501 504
502 DP_DEBUG(dev, QEDR_MSG_IWARP, 505 DP_DEBUG(dev, QEDR_MSG_IWARP,
503 "Connect source address: %pISpc, remote address: %pISpc\n", 506 "Connect source address: %pISpc, remote address: %pISpc\n",
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
599 int rc; 602 int rc;
600 int i; 603 int i;
601 604
602 laddr = (struct sockaddr_in *)&cm_id->local_addr; 605 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
603 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 606 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
604 607
605 DP_DEBUG(dev, QEDR_MSG_IWARP, 608 DP_DEBUG(dev, QEDR_MSG_IWARP,
606 "Create Listener address: %pISpc\n", &cm_id->local_addr); 609 "Create Listener address: %pISpc\n", &cm_id->local_addr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 53f00dbf313f..875b17272d65 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3034 3034
3035 switch (wr->opcode) { 3035 switch (wr->opcode) {
3036 case IB_WR_SEND_WITH_IMM: 3036 case IB_WR_SEND_WITH_IMM:
3037 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3038 rc = -EINVAL;
3039 *bad_wr = wr;
3040 break;
3041 }
3037 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; 3042 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3038 swqe = (struct rdma_sq_send_wqe_1st *)wqe; 3043 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3039 swqe->wqe_size = 2; 3044 swqe->wqe_size = 2;
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3075 break; 3080 break;
3076 3081
3077 case IB_WR_RDMA_WRITE_WITH_IMM: 3082 case IB_WR_RDMA_WRITE_WITH_IMM:
3083 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3084 rc = -EINVAL;
3085 *bad_wr = wr;
3086 break;
3087 }
3078 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; 3088 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3079 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; 3089 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3080 3090
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3724{ 3734{
3725 struct qedr_dev *dev = get_qedr_dev(ibcq->device); 3735 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3726 struct qedr_cq *cq = get_qedr_cq(ibcq); 3736 struct qedr_cq *cq = get_qedr_cq(ibcq);
3727 union rdma_cqe *cqe = cq->latest_cqe; 3737 union rdma_cqe *cqe;
3728 u32 old_cons, new_cons; 3738 u32 old_cons, new_cons;
3729 unsigned long flags; 3739 unsigned long flags;
3730 int update = 0; 3740 int update = 0;
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3741 return qedr_gsi_poll_cq(ibcq, num_entries, wc); 3751 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3742 3752
3743 spin_lock_irqsave(&cq->cq_lock, flags); 3753 spin_lock_irqsave(&cq->cq_lock, flags);
3754 cqe = cq->latest_cqe;
3744 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); 3755 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3745 while (num_entries && is_valid_cqe(cq, cqe)) { 3756 while (num_entries && is_valid_cqe(cq, cqe)) {
3746 struct qedr_qp *qp; 3757 struct qedr_qp *qp;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 1b2e5362a3ff..cc429b567d0a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
489 unsigned long timeout; 489 unsigned long timeout;
490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); 490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
491 491
492 if (percpu_ref_is_zero(&mr->refcount)) 492 if (mr->lkey) {
493 return 0; 493 /* avoid dma mr */
494 /* avoid dma mr */
495 if (mr->lkey)
496 rvt_dereg_clean_qps(mr); 494 rvt_dereg_clean_qps(mr);
495 /* @mr was indexed on rcu protected @lkey_table */
496 synchronize_rcu();
497 }
498
497 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); 499 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
498 if (!timeout) { 500 if (!timeout) {
499 rvt_pr_err(rdi, 501 rvt_pr_err(rdi,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 1f316d66e6f7..41614c185918 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
218{ 218{
219 struct matrix_keypad *keypad = input_get_drvdata(dev); 219 struct matrix_keypad *keypad = input_get_drvdata(dev);
220 220
221 spin_lock_irq(&keypad->lock);
221 keypad->stopped = true; 222 keypad->stopped = true;
222 mb(); 223 spin_unlock_irq(&keypad->lock);
224
223 flush_work(&keypad->work.work); 225 flush_work(&keypad->work.work);
224 /* 226 /*
225 * matrix_keypad_scan() will leave IRQs enabled; 227 * matrix_keypad_scan() will leave IRQs enabled;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3d2e23a0ae39..a246fc686bb7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = {
173 "LEN0046", /* X250 */ 173 "LEN0046", /* X250 */
174 "LEN004a", /* W541 */ 174 "LEN004a", /* W541 */
175 "LEN200f", /* T450s */ 175 "LEN200f", /* T450s */
176 "LEN2018", /* T460p */
177 NULL 176 NULL
178}; 177};
179 178
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index db4f6bb502e3..a5ab774da4cc 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -1,11 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 2// Melfas MMS114/MMS152 touchscreen device driver
3 * Author: Joonyoung Shim <jy0922.shim@samsung.com> 3//
4 * 4// Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify 5// Author: Joonyoung Shim <jy0922.shim@samsung.com>
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9 6
10#include <linux/module.h> 7#include <linux/module.h>
11#include <linux/delay.h> 8#include <linux/delay.h>
@@ -624,4 +621,4 @@ module_i2c_driver(mms114_driver);
624/* Module information */ 621/* Module information */
625MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); 622MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
626MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); 623MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver");
627MODULE_LICENSE("GPL"); 624MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1d3056f53747..2cbb19cddbf8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = {
1412 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 1412 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1413 */ 1413 */
1414#define IRQS_PER_CHUNK_SHIFT 5 1414#define IRQS_PER_CHUNK_SHIFT 5
1415#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 1415#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
1416#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1416#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1417 1417
1418static unsigned long *lpi_bitmap; 1418static unsigned long *lpi_bitmap;
@@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2119 2119
2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2121 /* 2121 /*
2122 * At least one bit of EventID is being used, hence a minimum 2122 * We allocate at least one chunk worth of LPIs bet device,
2123 * of two entries. No, the architecture doesn't let you 2123 * and thus that many ITEs. The device may require less though.
2124 * express an ITT with a single entry.
2125 */ 2124 */
2126 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 2125 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
2127 sz = nr_ites * its->ite_size; 2126 sz = nr_ites * its->ite_size;
2128 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2127 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2129 itt = kzalloc(sz, GFP_KERNEL); 2128 itt = kzalloc(sz, GFP_KERNEL);
@@ -2495,7 +2494,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
2495 2494
2496static void its_vpe_schedule(struct its_vpe *vpe) 2495static void its_vpe_schedule(struct its_vpe *vpe)
2497{ 2496{
2498 void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2497 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2499 u64 val; 2498 u64 val;
2500 2499
2501 /* Schedule the VPE */ 2500 /* Schedule the VPE */
@@ -2527,7 +2526,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
2527 2526
2528static void its_vpe_deschedule(struct its_vpe *vpe) 2527static void its_vpe_deschedule(struct its_vpe *vpe)
2529{ 2528{
2530 void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2529 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2531 u32 count = 1000000; /* 1s! */ 2530 u32 count = 1000000; /* 1s! */
2532 bool clean; 2531 bool clean;
2533 u64 val; 2532 u64 val;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 675eda5ff2b8..4760307ab43f 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -28,20 +28,6 @@ struct gpcv2_irqchip_data {
28 28
29static struct gpcv2_irqchip_data *imx_gpcv2_instance; 29static struct gpcv2_irqchip_data *imx_gpcv2_instance;
30 30
31/*
32 * Interface for the low level wakeup code.
33 */
34u32 imx_gpcv2_get_wakeup_source(u32 **sources)
35{
36 if (!imx_gpcv2_instance)
37 return 0;
38
39 if (sources)
40 *sources = imx_gpcv2_instance->wakeup_sources;
41
42 return IMR_NUM;
43}
44
45static int gpcv2_wakeup_source_save(void) 31static int gpcv2_wakeup_source_save(void)
46{ 32{
47 struct gpcv2_irqchip_data *cd; 33 struct gpcv2_irqchip_data *cd;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 1a46b41dac70..6422846b546e 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
659static void search_free(struct closure *cl) 659static void search_free(struct closure *cl)
660{ 660{
661 struct search *s = container_of(cl, struct search, cl); 661 struct search *s = container_of(cl, struct search, cl);
662 bio_complete(s);
663 662
664 if (s->iop.bio) 663 if (s->iop.bio)
665 bio_put(s->iop.bio); 664 bio_put(s->iop.bio);
666 665
666 bio_complete(s);
667 closure_debug_destroy(cl); 667 closure_debug_destroy(cl);
668 mempool_free(s, s->d->c->search); 668 mempool_free(s, s->d->c->search);
669} 669}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 312895788036..f2273143b3cb 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
963 uint32_t rtime = cpu_to_le32(get_seconds()); 963 uint32_t rtime = cpu_to_le32(get_seconds());
964 struct uuid_entry *u; 964 struct uuid_entry *u;
965 char buf[BDEVNAME_SIZE]; 965 char buf[BDEVNAME_SIZE];
966 struct cached_dev *exist_dc, *t;
966 967
967 bdevname(dc->bdev, buf); 968 bdevname(dc->bdev, buf);
968 969
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
987 return -EINVAL; 988 return -EINVAL;
988 } 989 }
989 990
991 /* Check whether already attached */
992 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
993 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
994 pr_err("Tried to attach %s but duplicate UUID already attached",
995 buf);
996
997 return -EINVAL;
998 }
999 }
1000
990 u = uuid_find(c, dc->sb.uuid); 1001 u = uuid_find(c, dc->sb.uuid);
991 1002
992 if (u && 1003 if (u &&
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1204 1215
1205 return; 1216 return;
1206err: 1217err:
1207 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1218 pr_notice("error %s: %s", bdevname(bdev, name), err);
1208 bcache_device_stop(&dc->disk); 1219 bcache_device_stop(&dc->disk);
1209} 1220}
1210 1221
@@ -1274,7 +1285,7 @@ static int flash_devs_run(struct cache_set *c)
1274 struct uuid_entry *u; 1285 struct uuid_entry *u;
1275 1286
1276 for (u = c->uuids; 1287 for (u = c->uuids;
1277 u < c->uuids + c->devices_max_used && !ret; 1288 u < c->uuids + c->nr_uuids && !ret;
1278 u++) 1289 u++)
1279 if (UUID_FLASH_ONLY(u)) 1290 if (UUID_FLASH_ONLY(u))
1280 ret = flash_dev_run(c, u); 1291 ret = flash_dev_run(c, u);
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1883 const char *err = NULL; /* must be set for any error case */ 1894 const char *err = NULL; /* must be set for any error case */
1884 int ret = 0; 1895 int ret = 0;
1885 1896
1897 bdevname(bdev, name);
1898
1886 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1899 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1887 ca->bdev = bdev; 1900 ca->bdev = bdev;
1888 ca->bdev->bd_holder = ca; 1901 ca->bdev->bd_holder = ca;
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1891 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 1904 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
1892 get_page(sb_page); 1905 get_page(sb_page);
1893 1906
1894 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1907 if (blk_queue_discard(bdev_get_queue(bdev)))
1895 ca->discard = CACHE_DISCARD(&ca->sb); 1908 ca->discard = CACHE_DISCARD(&ca->sb);
1896 1909
1897 ret = cache_alloc(ca); 1910 ret = cache_alloc(ca);
1898 if (ret != 0) { 1911 if (ret != 0) {
1912 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1899 if (ret == -ENOMEM) 1913 if (ret == -ENOMEM)
1900 err = "cache_alloc(): -ENOMEM"; 1914 err = "cache_alloc(): -ENOMEM";
1901 else 1915 else
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1918 goto out; 1932 goto out;
1919 } 1933 }
1920 1934
1921 pr_info("registered cache device %s", bdevname(bdev, name)); 1935 pr_info("registered cache device %s", name);
1922 1936
1923out: 1937out:
1924 kobject_put(&ca->kobj); 1938 kobject_put(&ca->kobj);
1925 1939
1926err: 1940err:
1927 if (err) 1941 if (err)
1928 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1942 pr_notice("error %s: %s", name, err);
1929 1943
1930 return ret; 1944 return ret;
1931} 1945}
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2014 if (err) 2028 if (err)
2015 goto err_close; 2029 goto err_close;
2016 2030
2031 err = "failed to register device";
2017 if (SB_IS_BDEV(sb)) { 2032 if (SB_IS_BDEV(sb)) {
2018 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2033 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2019 if (!dc) 2034 if (!dc)
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2028 goto err_close; 2043 goto err_close;
2029 2044
2030 if (register_cache(sb, sb_page, bdev, ca) != 0) 2045 if (register_cache(sb, sb_page, bdev, ca) != 0)
2031 goto err_close; 2046 goto err;
2032 } 2047 }
2033out: 2048out:
2034 if (sb_page) 2049 if (sb_page)
@@ -2041,7 +2056,7 @@ out:
2041err_close: 2056err_close:
2042 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2057 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2043err: 2058err:
2044 pr_info("error opening %s: %s", path, err); 2059 pr_info("error %s: %s", path, err);
2045 ret = -EINVAL; 2060 ret = -EINVAL;
2046 goto out; 2061 goto out;
2047} 2062}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 414c9af54ded..aa2032fa80d4 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
387 enum data_mode *data_mode) 387 enum data_mode *data_mode)
388{ 388{
389 unsigned noio_flag;
390 void *ptr;
391
392 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { 389 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
393 *data_mode = DATA_MODE_SLAB; 390 *data_mode = DATA_MODE_SLAB;
394 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); 391 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
412 * all allocations done by this process (including pagetables) are done 409 * all allocations done by this process (including pagetables) are done
413 * as if GFP_NOIO was specified. 410 * as if GFP_NOIO was specified.
414 */ 411 */
412 if (gfp_mask & __GFP_NORETRY) {
413 unsigned noio_flag = memalloc_noio_save();
414 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
415 415
416 if (gfp_mask & __GFP_NORETRY)
417 noio_flag = memalloc_noio_save();
418
419 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
420
421 if (gfp_mask & __GFP_NORETRY)
422 memalloc_noio_restore(noio_flag); 416 memalloc_noio_restore(noio_flag);
417 return ptr;
418 }
423 419
424 return ptr; 420 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
425} 421}
426 422
427/* 423/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7d3e572072f5..a05a560d3cba 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -211,29 +211,27 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
211 else 211 else
212 m->queue_mode = DM_TYPE_REQUEST_BASED; 212 m->queue_mode = DM_TYPE_REQUEST_BASED;
213 213
214 } else if (m->queue_mode == DM_TYPE_BIO_BASED || 214 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
215 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
216 INIT_WORK(&m->process_queued_bios, process_queued_bios); 215 INIT_WORK(&m->process_queued_bios, process_queued_bios);
217 216 /*
218 if (m->queue_mode == DM_TYPE_BIO_BASED) { 217 * bio-based doesn't support any direct scsi_dh management;
219 /* 218 * it just discovers if a scsi_dh is attached.
220 * bio-based doesn't support any direct scsi_dh management; 219 */
221 * it just discovers if a scsi_dh is attached. 220 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
222 */
223 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
224 }
225 }
226
227 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
228 set_bit(MPATHF_QUEUE_IO, &m->flags);
229 atomic_set(&m->pg_init_in_progress, 0);
230 atomic_set(&m->pg_init_count, 0);
231 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
232 init_waitqueue_head(&m->pg_init_wait);
233 } 221 }
234 222
235 dm_table_set_type(ti->table, m->queue_mode); 223 dm_table_set_type(ti->table, m->queue_mode);
236 224
225 /*
226 * Init fields that are only used when a scsi_dh is attached
227 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
228 */
229 set_bit(MPATHF_QUEUE_IO, &m->flags);
230 atomic_set(&m->pg_init_in_progress, 0);
231 atomic_set(&m->pg_init_count, 0);
232 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
233 init_waitqueue_head(&m->pg_init_wait);
234
237 return 0; 235 return 0;
238} 236}
239 237
@@ -337,9 +335,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
337{ 335{
338 m->current_pg = pg; 336 m->current_pg = pg;
339 337
340 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
341 return;
342
343 /* Must we initialise the PG first, and queue I/O till it's ready? */ 338 /* Must we initialise the PG first, and queue I/O till it's ready? */
344 if (m->hw_handler_name) { 339 if (m->hw_handler_name) {
345 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 340 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
@@ -385,8 +380,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
385 unsigned bypassed = 1; 380 unsigned bypassed = 1;
386 381
387 if (!atomic_read(&m->nr_valid_paths)) { 382 if (!atomic_read(&m->nr_valid_paths)) {
388 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) 383 clear_bit(MPATHF_QUEUE_IO, &m->flags);
389 clear_bit(MPATHF_QUEUE_IO, &m->flags);
390 goto failed; 384 goto failed;
391 } 385 }
392 386
@@ -599,7 +593,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
599 return pgpath; 593 return pgpath;
600} 594}
601 595
602static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) 596static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
603{ 597{
604 struct pgpath *pgpath; 598 struct pgpath *pgpath;
605 unsigned long flags; 599 unsigned long flags;
@@ -634,8 +628,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
634{ 628{
635 struct pgpath *pgpath; 629 struct pgpath *pgpath;
636 630
637 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) 631 if (!m->hw_handler_name)
638 pgpath = __map_bio_nvme(m, bio); 632 pgpath = __map_bio_fast(m, bio);
639 else 633 else
640 pgpath = __map_bio(m, bio); 634 pgpath = __map_bio(m, bio);
641 635
@@ -675,8 +669,7 @@ static void process_queued_io_list(struct multipath *m)
675{ 669{
676 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 670 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
677 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 671 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
678 else if (m->queue_mode == DM_TYPE_BIO_BASED || 672 else if (m->queue_mode == DM_TYPE_BIO_BASED)
679 m->queue_mode == DM_TYPE_NVME_BIO_BASED)
680 queue_work(kmultipathd, &m->process_queued_bios); 673 queue_work(kmultipathd, &m->process_queued_bios);
681} 674}
682 675
@@ -811,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
811 return 0; 804 return 0;
812} 805}
813 806
814static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) 807static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
808 const char *attached_handler_name, char **error)
815{ 809{
816 struct request_queue *q = bdev_get_queue(bdev); 810 struct request_queue *q = bdev_get_queue(bdev);
817 const char *attached_handler_name;
818 int r; 811 int r;
819 812
820 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { 813 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
821retain: 814retain:
822 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
823 if (attached_handler_name) { 815 if (attached_handler_name) {
824 /* 816 /*
825 * Clear any hw_handler_params associated with a 817 * Clear any hw_handler_params associated with a
@@ -873,6 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
873 int r; 865 int r;
874 struct pgpath *p; 866 struct pgpath *p;
875 struct multipath *m = ti->private; 867 struct multipath *m = ti->private;
868 struct request_queue *q;
869 const char *attached_handler_name;
876 870
877 /* we need at least a path arg */ 871 /* we need at least a path arg */
878 if (as->argc < 1) { 872 if (as->argc < 1) {
@@ -891,9 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
891 goto bad; 885 goto bad;
892 } 886 }
893 887
894 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { 888 q = bdev_get_queue(p->path.dev->bdev);
889 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
890 if (attached_handler_name) {
895 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 891 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
896 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); 892 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
897 if (r) { 893 if (r) {
898 dm_put_device(ti, p->path.dev); 894 dm_put_device(ti, p->path.dev);
899 goto bad; 895 goto bad;
@@ -1001,8 +997,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1001 if (!hw_argc) 997 if (!hw_argc)
1002 return 0; 998 return 0;
1003 999
1004 if (m->queue_mode == DM_TYPE_BIO_BASED || 1000 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1005 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
1006 dm_consume_args(as, hw_argc); 1001 dm_consume_args(as, hw_argc);
1007 DMERR("bio-based multipath doesn't allow hardware handler args"); 1002 DMERR("bio-based multipath doesn't allow hardware handler args");
1008 return 0; 1003 return 0;
@@ -1091,8 +1086,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1091 1086
1092 if (!strcasecmp(queue_mode_name, "bio")) 1087 if (!strcasecmp(queue_mode_name, "bio"))
1093 m->queue_mode = DM_TYPE_BIO_BASED; 1088 m->queue_mode = DM_TYPE_BIO_BASED;
1094 else if (!strcasecmp(queue_mode_name, "nvme"))
1095 m->queue_mode = DM_TYPE_NVME_BIO_BASED;
1096 else if (!strcasecmp(queue_mode_name, "rq")) 1089 else if (!strcasecmp(queue_mode_name, "rq"))
1097 m->queue_mode = DM_TYPE_REQUEST_BASED; 1090 m->queue_mode = DM_TYPE_REQUEST_BASED;
1098 else if (!strcasecmp(queue_mode_name, "mq")) 1091 else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1193,7 +1186,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1193 ti->num_discard_bios = 1; 1186 ti->num_discard_bios = 1;
1194 ti->num_write_same_bios = 1; 1187 ti->num_write_same_bios = 1;
1195 ti->num_write_zeroes_bios = 1; 1188 ti->num_write_zeroes_bios = 1;
1196 if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) 1189 if (m->queue_mode == DM_TYPE_BIO_BASED)
1197 ti->per_io_data_size = multipath_per_bio_data_size(); 1190 ti->per_io_data_size = multipath_per_bio_data_size();
1198 else 1191 else
1199 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1192 ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1730,9 +1723,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1730 case DM_TYPE_BIO_BASED: 1723 case DM_TYPE_BIO_BASED:
1731 DMEMIT("queue_mode bio "); 1724 DMEMIT("queue_mode bio ");
1732 break; 1725 break;
1733 case DM_TYPE_NVME_BIO_BASED:
1734 DMEMIT("queue_mode nvme ");
1735 break;
1736 case DM_TYPE_MQ_REQUEST_BASED: 1726 case DM_TYPE_MQ_REQUEST_BASED:
1737 DMEMIT("queue_mode mq "); 1727 DMEMIT("queue_mode mq ");
1738 break; 1728 break;
@@ -2030,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti)
2030 *---------------------------------------------------------------*/ 2020 *---------------------------------------------------------------*/
2031static struct target_type multipath_target = { 2021static struct target_type multipath_target = {
2032 .name = "multipath", 2022 .name = "multipath",
2033 .version = {1, 12, 0}, 2023 .version = {1, 13, 0},
2034 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, 2024 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2025 DM_TARGET_PASSES_INTEGRITY,
2035 .module = THIS_MODULE, 2026 .module = THIS_MODULE,
2036 .ctr = multipath_ctr, 2027 .ctr = multipath_ctr,
2037 .dtr = multipath_dtr, 2028 .dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7ef469e902c6..c1d1034ff7b7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3409 3409
3410 } else { 3410 } else {
3411 if (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3411 if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
3412 test_bit(MD_RECOVERY_RESHAPE, &recovery) || 3412 (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
3413 test_bit(MD_RECOVERY_RUNNING, &recovery)) 3413 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3414 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3414 r = mddev->curr_resync_completed; 3415 r = mddev->curr_resync_completed;
3415 else 3416 else
3416 r = mddev->recovery_cp; 3417 r = mddev->recovery_cp;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5fe7ec356c33..7eb3e2a3c07d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t)
942 942
943 if (t->type != DM_TYPE_NONE) { 943 if (t->type != DM_TYPE_NONE) {
944 /* target already set the table's type */ 944 /* target already set the table's type */
945 if (t->type == DM_TYPE_BIO_BASED) 945 if (t->type == DM_TYPE_BIO_BASED) {
946 return 0; 946 /* possibly upgrade to a variant of bio-based */
947 else if (t->type == DM_TYPE_NVME_BIO_BASED) { 947 goto verify_bio_based;
948 if (!dm_table_does_not_support_partial_completion(t)) {
949 DMERR("nvme bio-based is only possible with devices"
950 " that don't support partial completion");
951 return -EINVAL;
952 }
953 /* Fallthru, also verify all devices are blk-mq */
954 } 948 }
955 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 949 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
950 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
956 goto verify_rq_based; 951 goto verify_rq_based;
957 } 952 }
958 953
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t)
985 } 980 }
986 981
987 if (bio_based) { 982 if (bio_based) {
983verify_bio_based:
988 /* We must use this table as bio-based */ 984 /* We must use this table as bio-based */
989 t->type = DM_TYPE_BIO_BASED; 985 t->type = DM_TYPE_BIO_BASED;
990 if (dm_table_supports_dax(t) || 986 if (dm_table_supports_dax(t) ||
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev
1755 char b[BDEVNAME_SIZE]; 1751 char b[BDEVNAME_SIZE];
1756 1752
1757 /* For now, NVMe devices are the only devices of this class */ 1753 /* For now, NVMe devices are the only devices of this class */
1758 return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); 1754 return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1759} 1755}
1760 1756
1761static bool dm_table_does_not_support_partial_completion(struct dm_table *t) 1757static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68136806d365..45328d8b2859 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
458 return dm_get_geometry(md, geo); 458 return dm_get_geometry(md, geo);
459} 459}
460 460
461static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 461static char *_dm_claim_ptr = "I belong to device-mapper";
462 struct block_device **bdev, 462
463 fmode_t *mode) 463static int dm_get_bdev_for_ioctl(struct mapped_device *md,
464 struct block_device **bdev,
465 fmode_t *mode)
464{ 466{
465 struct dm_target *tgt; 467 struct dm_target *tgt;
466 struct dm_table *map; 468 struct dm_table *map;
@@ -490,6 +492,10 @@ retry:
490 goto out; 492 goto out;
491 493
492 bdgrab(*bdev); 494 bdgrab(*bdev);
495 r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
496 if (r < 0)
497 goto out;
498
493 dm_put_live_table(md, srcu_idx); 499 dm_put_live_table(md, srcu_idx);
494 return r; 500 return r;
495 501
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
508 struct mapped_device *md = bdev->bd_disk->private_data; 514 struct mapped_device *md = bdev->bd_disk->private_data;
509 int r; 515 int r;
510 516
511 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 517 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
512 if (r < 0) 518 if (r < 0)
513 return r; 519 return r;
514 520
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
528 534
529 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 535 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
530out: 536out:
531 bdput(bdev); 537 blkdev_put(bdev, mode);
532 return r; 538 return r;
533} 539}
534 540
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
708static int open_table_device(struct table_device *td, dev_t dev, 714static int open_table_device(struct table_device *td, dev_t dev,
709 struct mapped_device *md) 715 struct mapped_device *md)
710{ 716{
711 static char *_claim_ptr = "I belong to device-mapper";
712 struct block_device *bdev; 717 struct block_device *bdev;
713 718
714 int r; 719 int r;
715 720
716 BUG_ON(td->dm_dev.bdev); 721 BUG_ON(td->dm_dev.bdev);
717 722
718 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 723 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
719 if (IS_ERR(bdev)) 724 if (IS_ERR(bdev))
720 return PTR_ERR(bdev); 725 return PTR_ERR(bdev);
721 726
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3011 fmode_t mode; 3016 fmode_t mode;
3012 int r; 3017 int r;
3013 3018
3014 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3019 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3015 if (r < 0) 3020 if (r < 0)
3016 return r; 3021 return r;
3017 3022
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3021 else 3026 else
3022 r = -EOPNOTSUPP; 3027 r = -EOPNOTSUPP;
3023 3028
3024 bdput(bdev); 3029 blkdev_put(bdev, mode);
3025 return r; 3030 return r;
3026} 3031}
3027 3032
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3032 fmode_t mode; 3037 fmode_t mode;
3033 int r; 3038 int r;
3034 3039
3035 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3040 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3036 if (r < 0) 3041 if (r < 0)
3037 return r; 3042 return r;
3038 3043
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3042 else 3047 else
3043 r = -EOPNOTSUPP; 3048 r = -EOPNOTSUPP;
3044 3049
3045 bdput(bdev); 3050 blkdev_put(bdev, mode);
3046 return r; 3051 return r;
3047} 3052}
3048 3053
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3054 fmode_t mode; 3059 fmode_t mode;
3055 int r; 3060 int r;
3056 3061
3057 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3062 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3058 if (r < 0) 3063 if (r < 0)
3059 return r; 3064 return r;
3060 3065
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3064 else 3069 else
3065 r = -EOPNOTSUPP; 3070 r = -EOPNOTSUPP;
3066 3071
3067 bdput(bdev); 3072 blkdev_put(bdev, mode);
3068 return r; 3073 return r;
3069} 3074}
3070 3075
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3075 fmode_t mode; 3080 fmode_t mode;
3076 int r; 3081 int r;
3077 3082
3078 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3083 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3079 if (r < 0) 3084 if (r < 0)
3080 return r; 3085 return r;
3081 3086
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3085 else 3090 else
3086 r = -EOPNOTSUPP; 3091 r = -EOPNOTSUPP;
3087 3092
3088 bdput(bdev); 3093 blkdev_put(bdev, mode);
3089 return r; 3094 return r;
3090} 3095}
3091 3096
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index e40065bdbfc8..0a7e99d62c69 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
157 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 157 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
158 } 158 }
159 rcu_read_unlock(); 159 rcu_read_unlock();
160 seq_printf (seq, "]"); 160 seq_putc(seq, ']');
161} 161}
162 162
163static int multipath_congested(struct mddev *mddev, int bits) 163static int multipath_congested(struct mddev *mddev, int bits)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index bc67ab6844f0..254e44e44668 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
801 struct bio *bio; 801 struct bio *bio;
802 int ff = 0; 802 int ff = 0;
803 803
804 if (!page)
805 return;
806
804 if (test_bit(Faulty, &rdev->flags)) 807 if (test_bit(Faulty, &rdev->flags))
805 return; 808 return;
806 809
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev)
5452 * the only valid external interface is through the md 5455 * the only valid external interface is through the md
5453 * device. 5456 * device.
5454 */ 5457 */
5458 mddev->has_superblocks = false;
5455 rdev_for_each(rdev, mddev) { 5459 rdev_for_each(rdev, mddev) {
5456 if (test_bit(Faulty, &rdev->flags)) 5460 if (test_bit(Faulty, &rdev->flags))
5457 continue; 5461 continue;
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev)
5465 set_disk_ro(mddev->gendisk, 1); 5469 set_disk_ro(mddev->gendisk, 1);
5466 } 5470 }
5467 5471
5472 if (rdev->sb_page)
5473 mddev->has_superblocks = true;
5474
5468 /* perform some consistency tests on the device. 5475 /* perform some consistency tests on the device.
5469 * We don't want the data to overlap the metadata, 5476 * We don't want the data to overlap the metadata,
5470 * Internal Bitmap issues have been handled elsewhere. 5477 * Internal Bitmap issues have been handled elsewhere.
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev)
5497 } 5504 }
5498 if (mddev->sync_set == NULL) { 5505 if (mddev->sync_set == NULL) {
5499 mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 5506 mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5500 if (!mddev->sync_set) 5507 if (!mddev->sync_set) {
5501 return -ENOMEM; 5508 err = -ENOMEM;
5509 goto abort;
5510 }
5502 } 5511 }
5503 5512
5504 spin_lock(&pers_lock); 5513 spin_lock(&pers_lock);
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev)
5511 else 5520 else
5512 pr_warn("md: personality for level %s is not loaded!\n", 5521 pr_warn("md: personality for level %s is not loaded!\n",
5513 mddev->clevel); 5522 mddev->clevel);
5514 return -EINVAL; 5523 err = -EINVAL;
5524 goto abort;
5515 } 5525 }
5516 spin_unlock(&pers_lock); 5526 spin_unlock(&pers_lock);
5517 if (mddev->level != pers->level) { 5527 if (mddev->level != pers->level) {
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev)
5524 pers->start_reshape == NULL) { 5534 pers->start_reshape == NULL) {
5525 /* This personality cannot handle reshaping... */ 5535 /* This personality cannot handle reshaping... */
5526 module_put(pers->owner); 5536 module_put(pers->owner);
5527 return -EINVAL; 5537 err = -EINVAL;
5538 goto abort;
5528 } 5539 }
5529 5540
5530 if (pers->sync_request) { 5541 if (pers->sync_request) {
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev)
5593 mddev->private = NULL; 5604 mddev->private = NULL;
5594 module_put(pers->owner); 5605 module_put(pers->owner);
5595 bitmap_destroy(mddev); 5606 bitmap_destroy(mddev);
5596 return err; 5607 goto abort;
5597 } 5608 }
5598 if (mddev->queue) { 5609 if (mddev->queue) {
5599 bool nonrot = true; 5610 bool nonrot = true;
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev)
5655 sysfs_notify_dirent_safe(mddev->sysfs_action); 5666 sysfs_notify_dirent_safe(mddev->sysfs_action);
5656 sysfs_notify(&mddev->kobj, NULL, "degraded"); 5667 sysfs_notify(&mddev->kobj, NULL, "degraded");
5657 return 0; 5668 return 0;
5669
5670abort:
5671 if (mddev->bio_set) {
5672 bioset_free(mddev->bio_set);
5673 mddev->bio_set = NULL;
5674 }
5675 if (mddev->sync_set) {
5676 bioset_free(mddev->sync_set);
5677 mddev->sync_set = NULL;
5678 }
5679
5680 return err;
5658} 5681}
5659EXPORT_SYMBOL_GPL(md_run); 5682EXPORT_SYMBOL_GPL(md_run);
5660 5683
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync);
8049bool md_write_start(struct mddev *mddev, struct bio *bi) 8072bool md_write_start(struct mddev *mddev, struct bio *bi)
8050{ 8073{
8051 int did_change = 0; 8074 int did_change = 0;
8075
8052 if (bio_data_dir(bi) != WRITE) 8076 if (bio_data_dir(bi) != WRITE)
8053 return true; 8077 return true;
8054 8078
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
8081 rcu_read_unlock(); 8105 rcu_read_unlock();
8082 if (did_change) 8106 if (did_change)
8083 sysfs_notify_dirent_safe(mddev->sysfs_state); 8107 sysfs_notify_dirent_safe(mddev->sysfs_state);
8108 if (!mddev->has_superblocks)
8109 return true;
8084 wait_event(mddev->sb_wait, 8110 wait_event(mddev->sb_wait,
8085 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || 8111 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8086 mddev->suspended); 8112 mddev->suspended);
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread)
8543 set_mask_bits(&mddev->sb_flags, 0, 8569 set_mask_bits(&mddev->sb_flags, 0,
8544 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 8570 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8545 8571
8572 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8573 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8574 mddev->delta_disks > 0 &&
8575 mddev->pers->finish_reshape &&
8576 mddev->pers->size &&
8577 mddev->queue) {
8578 mddev_lock_nointr(mddev);
8579 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
8580 mddev_unlock(mddev);
8581 set_capacity(mddev->gendisk, mddev->array_sectors);
8582 revalidate_disk(mddev->gendisk);
8583 }
8584
8546 spin_lock(&mddev->lock); 8585 spin_lock(&mddev->lock);
8547 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8586 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8548 /* We completed so min/max setting can be forgotten if used. */ 8587 /* We completed so min/max setting can be forgotten if used. */
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev,
8569 int removed = 0; 8608 int removed = 0;
8570 bool remove_some = false; 8609 bool remove_some = false;
8571 8610
8611 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8612 /* Mustn't remove devices when resync thread is running */
8613 return 0;
8614
8572 rdev_for_each(rdev, mddev) { 8615 rdev_for_each(rdev, mddev) {
8573 if ((this == NULL || rdev == this) && 8616 if ((this == NULL || rdev == this) &&
8574 rdev->raid_disk >= 0 && 8617 rdev->raid_disk >= 0 &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 58cd20a5e85e..fbc925cce810 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -468,6 +468,8 @@ struct mddev {
468 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 468 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
469 struct md_cluster_info *cluster_info; 469 struct md_cluster_info *cluster_info;
470 unsigned int good_device_nr; /* good device num within cluster raid */ 470 unsigned int good_device_nr; /* good device num within cluster raid */
471
472 bool has_superblocks:1;
471}; 473};
472 474
473enum recovery_flags { 475enum recovery_flags {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f978eddc7a21..fe872dc6712e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1809 struct md_rdev *repl = 1809 struct md_rdev *repl =
1810 conf->mirrors[conf->raid_disks + number].rdev; 1810 conf->mirrors[conf->raid_disks + number].rdev;
1811 freeze_array(conf, 0); 1811 freeze_array(conf, 0);
1812 if (atomic_read(&repl->nr_pending)) {
1813 /* It means that some queued IO of retry_list
1814 * hold repl. Thus, we cannot set replacement
1815 * as NULL, avoiding rdev NULL pointer
1816 * dereference in sync_request_write and
1817 * handle_write_finished.
1818 */
1819 err = -EBUSY;
1820 unfreeze_array(conf);
1821 goto abort;
1822 }
1812 clear_bit(Replacement, &repl->flags); 1823 clear_bit(Replacement, &repl->flags);
1813 p->rdev = repl; 1824 p->rdev = repl;
1814 conf->mirrors[conf->raid_disks + number].rdev = NULL; 1825 conf->mirrors[conf->raid_disks + number].rdev = NULL;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c7294e7557e0..eb84bc68e2fd 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -26,6 +26,18 @@
26#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) 26#define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
27#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) 27#define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS)
28 28
29/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
30 * There are three safe ways to access raid1_info.rdev.
31 * 1/ when holding mddev->reconfig_mutex
32 * 2/ when resync/recovery is known to be happening - i.e. in code that is
33 * called as part of performing resync/recovery.
34 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
35 * and if it is non-NULL, increment rdev->nr_pending before dropping the
36 * RCU lock.
37 * When .rdev is set to NULL, the nr_pending count checked again and if it has
38 * been incremented, the pointer is put back in .rdev.
39 */
40
29struct raid1_info { 41struct raid1_info {
30 struct md_rdev *rdev; 42 struct md_rdev *rdev;
31 sector_t head_position; 43 sector_t head_position;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 99c9207899a7..c5e6c60fc0d4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)
141#define RESYNC_WINDOW (1024*1024) 141#define RESYNC_WINDOW (1024*1024)
142/* maximum number of concurrent requests, memory permitting */ 142/* maximum number of concurrent requests, memory permitting */
143#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) 143#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
144#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) 144#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
145#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) 145#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
146 146
147/* 147/*
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2655 for (m = 0; m < conf->copies; m++) { 2655 for (m = 0; m < conf->copies; m++) {
2656 int dev = r10_bio->devs[m].devnum; 2656 int dev = r10_bio->devs[m].devnum;
2657 rdev = conf->mirrors[dev].rdev; 2657 rdev = conf->mirrors[dev].rdev;
2658 if (r10_bio->devs[m].bio == NULL) 2658 if (r10_bio->devs[m].bio == NULL ||
2659 r10_bio->devs[m].bio->bi_end_io == NULL)
2659 continue; 2660 continue;
2660 if (!r10_bio->devs[m].bio->bi_status) { 2661 if (!r10_bio->devs[m].bio->bi_status) {
2661 rdev_clear_badblocks( 2662 rdev_clear_badblocks(
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2670 md_error(conf->mddev, rdev); 2671 md_error(conf->mddev, rdev);
2671 } 2672 }
2672 rdev = conf->mirrors[dev].replacement; 2673 rdev = conf->mirrors[dev].replacement;
2673 if (r10_bio->devs[m].repl_bio == NULL) 2674 if (r10_bio->devs[m].repl_bio == NULL ||
2675 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2674 continue; 2676 continue;
2675 2677
2676 if (!r10_bio->devs[m].repl_bio->bi_status) { 2678 if (!r10_bio->devs[m].repl_bio->bi_status) {
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev)
3782 if (fc > 1 || fo > 0) { 3784 if (fc > 1 || fo > 0) {
3783 pr_err("only near layout is supported by clustered" 3785 pr_err("only near layout is supported by clustered"
3784 " raid10\n"); 3786 " raid10\n");
3785 goto out; 3787 goto out_free_conf;
3786 } 3788 }
3787 } 3789 }
3788 3790
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev)
4830 return; 4832 return;
4831 4833
4832 if (mddev->delta_disks > 0) { 4834 if (mddev->delta_disks > 0) {
4833 sector_t size = raid10_size(mddev, 0, 0);
4834 md_set_array_sectors(mddev, size);
4835 if (mddev->recovery_cp > mddev->resync_max_sectors) { 4835 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4836 mddev->recovery_cp = mddev->resync_max_sectors; 4836 mddev->recovery_cp = mddev->resync_max_sectors;
4837 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4837 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4838 } 4838 }
4839 mddev->resync_max_sectors = size; 4839 mddev->resync_max_sectors = mddev->array_sectors;
4840 if (mddev->queue) {
4841 set_capacity(mddev->gendisk, mddev->array_sectors);
4842 revalidate_disk(mddev->gendisk);
4843 }
4844 } else { 4840 } else {
4845 int d; 4841 int d;
4846 rcu_read_lock(); 4842 rcu_read_lock();
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index db2ac22ac1b4..e2e8840de9bf 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -2,6 +2,19 @@
2#ifndef _RAID10_H 2#ifndef _RAID10_H
3#define _RAID10_H 3#define _RAID10_H
4 4
5/* Note: raid10_info.rdev can be set to NULL asynchronously by
6 * raid10_remove_disk.
7 * There are three safe ways to access raid10_info.rdev.
8 * 1/ when holding mddev->reconfig_mutex
9 * 2/ when resync/recovery/reshape is known to be happening - i.e. in code
10 * that is called as part of performing resync/recovery/reshape.
11 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
12 * and if it is non-NULL, increment rdev->nr_pending before dropping the
13 * RCU lock.
14 * When .rdev is set to NULL, the nr_pending count checked again and if it has
15 * been incremented, the pointer is put back in .rdev.
16 */
17
5struct raid10_info { 18struct raid10_info {
6 struct md_rdev *rdev, *replacement; 19 struct md_rdev *rdev, *replacement;
7 sector_t head_position; 20 sector_t head_position;
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 0c76bcedfc1c..a001808a2b77 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf);
44extern void ppl_stripe_write_finished(struct stripe_head *sh); 44extern void ppl_stripe_write_finished(struct stripe_head *sh);
45extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); 45extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
46extern void ppl_quiesce(struct r5conf *conf, int quiesce); 46extern void ppl_quiesce(struct r5conf *conf, int quiesce);
47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
47 48
48static inline bool raid5_has_ppl(struct r5conf *conf) 49static inline bool raid5_has_ppl(struct r5conf *conf)
49{ 50{
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
104 if (conf->log) 105 if (conf->log)
105 ret = r5l_handle_flush_request(conf->log, bio); 106 ret = r5l_handle_flush_request(conf->log, bio);
106 else if (raid5_has_ppl(conf)) 107 else if (raid5_has_ppl(conf))
107 ret = 0; 108 ret = ppl_handle_flush_request(conf->log, bio);
108 109
109 return ret; 110 return ret;
110} 111}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 2764c2290062..42890a08375b 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
693 } 693 }
694} 694}
695 695
696int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
697{
698 if (bio->bi_iter.bi_size == 0) {
699 bio_endio(bio);
700 return 0;
701 }
702 bio->bi_opf &= ~REQ_PREFLUSH;
703 return -EAGAIN;
704}
705
696void ppl_stripe_write_finished(struct stripe_head *sh) 706void ppl_stripe_write_finished(struct stripe_head *sh)
697{ 707{
698 struct ppl_io_unit *io; 708 struct ppl_io_unit *io;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 50d01144b805..b5d2601483e3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2196static int grow_stripes(struct r5conf *conf, int num) 2196static int grow_stripes(struct r5conf *conf, int num)
2197{ 2197{
2198 struct kmem_cache *sc; 2198 struct kmem_cache *sc;
2199 size_t namelen = sizeof(conf->cache_name[0]);
2199 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2200 int devs = max(conf->raid_disks, conf->previous_raid_disks);
2200 2201
2201 if (conf->mddev->gendisk) 2202 if (conf->mddev->gendisk)
2202 sprintf(conf->cache_name[0], 2203 snprintf(conf->cache_name[0], namelen,
2203 "raid%d-%s", conf->level, mdname(conf->mddev)); 2204 "raid%d-%s", conf->level, mdname(conf->mddev));
2204 else 2205 else
2205 sprintf(conf->cache_name[0], 2206 snprintf(conf->cache_name[0], namelen,
2206 "raid%d-%p", conf->level, conf->mddev); 2207 "raid%d-%p", conf->level, conf->mddev);
2207 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 2208 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
2208 2209
2209 conf->active_name = 0; 2210 conf->active_name = 0;
2210 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2211 sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf)
6764 6765
6765 log_exit(conf); 6766 log_exit(conf);
6766 6767
6767 if (conf->shrinker.nr_deferred) 6768 unregister_shrinker(&conf->shrinker);
6768 unregister_shrinker(&conf->shrinker);
6769
6770 free_thread_groups(conf); 6769 free_thread_groups(conf);
6771 shrink_stripes(conf); 6770 shrink_stripes(conf);
6772 raid5_free_percpu(conf); 6771 raid5_free_percpu(conf);
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
8001 8000
8002 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 8001 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8003 8002
8004 if (mddev->delta_disks > 0) { 8003 if (mddev->delta_disks <= 0) {
8005 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
8006 if (mddev->queue) {
8007 set_capacity(mddev->gendisk, mddev->array_sectors);
8008 revalidate_disk(mddev->gendisk);
8009 }
8010 } else {
8011 int d; 8004 int d;
8012 spin_lock_irq(&conf->device_lock); 8005 spin_lock_irq(&conf->device_lock);
8013 mddev->degraded = raid5_calc_degraded(conf); 8006 mddev->degraded = raid5_calc_degraded(conf);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2e6123825095..3f8da26032ac 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -450,6 +450,18 @@ enum {
450 * HANDLE gets cleared if stripe_handle leaves nothing locked. 450 * HANDLE gets cleared if stripe_handle leaves nothing locked.
451 */ 451 */
452 452
453/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
454 * There are three safe ways to access disk_info.rdev.
455 * 1/ when holding mddev->reconfig_mutex
456 * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
457 * is called as part of performing resync/recovery/reshape.
458 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
459 * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
460 * lock.
461 * When .rdev is set to NULL, the nr_pending count checked again and if
462 * it has been incremented, the pointer is put back in .rdev.
463 */
464
453struct disk_info { 465struct disk_info {
454 struct md_rdev *rdev, *replacement; 466 struct md_rdev *rdev, *replacement;
455 struct page *extra_page; /* extra page to use in prexor */ 467 struct page *extra_page; /* extra page to use in prexor */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 145e12bfb819..372c074bb1b9 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -147,6 +147,8 @@ config DVB_CORE
147config DVB_MMAP 147config DVB_MMAP
148 bool "Enable DVB memory-mapped API (EXPERIMENTAL)" 148 bool "Enable DVB memory-mapped API (EXPERIMENTAL)"
149 depends on DVB_CORE 149 depends on DVB_CORE
150 depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
151 select VIDEOBUF2_VMALLOC
150 default n 152 default n
151 help 153 help
152 This option enables DVB experimental memory-mapped API, with 154 This option enables DVB experimental memory-mapped API, with
diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig
index 5df05250de94..17c32ea58395 100644
--- a/drivers/media/common/videobuf2/Kconfig
+++ b/drivers/media/common/videobuf2/Kconfig
@@ -3,6 +3,9 @@ config VIDEOBUF2_CORE
3 select DMA_SHARED_BUFFER 3 select DMA_SHARED_BUFFER
4 tristate 4 tristate
5 5
6config VIDEOBUF2_V4L2
7 tristate
8
6config VIDEOBUF2_MEMOPS 9config VIDEOBUF2_MEMOPS
7 tristate 10 tristate
8 select FRAME_VECTOR 11 select FRAME_VECTOR
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile
index 19de5ccda20b..77bebe8b202f 100644
--- a/drivers/media/common/videobuf2/Makefile
+++ b/drivers/media/common/videobuf2/Makefile
@@ -1,5 +1,12 @@
1# SPDX-License-Identifier: GPL-2.0
2videobuf2-common-objs := videobuf2-core.o
1 3
2obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o 4ifeq ($(CONFIG_TRACEPOINTS),y)
5 videobuf2-common-objs += vb2-trace.o
6endif
7
8obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o
9obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
3obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o 10obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
4obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o 11obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
5obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o 12obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c
index 4c0f39d271f0..4c0f39d271f0 100644
--- a/drivers/media/v4l2-core/vb2-trace.c
+++ b/drivers/media/common/videobuf2/vb2-trace.c
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile
index 3a105d82019a..62b028ded9f7 100644
--- a/drivers/media/dvb-core/Makefile
+++ b/drivers/media/dvb-core/Makefile
@@ -4,7 +4,7 @@
4# 4#
5 5
6dvb-net-$(CONFIG_DVB_NET) := dvb_net.o 6dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
7dvb-vb2-$(CONFIG_DVB_MMSP) := dvb_vb2.o 7dvb-vb2-$(CONFIG_DVB_MMAP) := dvb_vb2.o
8 8
9dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \ 9dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \
10 dvb_ca_en50221.o dvb_frontend.o \ 10 dvb_ca_en50221.o dvb_frontend.o \
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 6d53af00190e..61a750fae465 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -128,11 +128,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
128 struct dvb_device *dvbdev = file->private_data; 128 struct dvb_device *dvbdev = file->private_data;
129 struct dmxdev *dmxdev = dvbdev->priv; 129 struct dmxdev *dmxdev = dvbdev->priv;
130 struct dmx_frontend *front; 130 struct dmx_frontend *front;
131#ifndef DVB_MMAP
132 bool need_ringbuffer = false; 131 bool need_ringbuffer = false;
133#else
134 const bool need_ringbuffer = true;
135#endif
136 132
137 dprintk("%s\n", __func__); 133 dprintk("%s\n", __func__);
138 134
@@ -144,17 +140,31 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
144 return -ENODEV; 140 return -ENODEV;
145 } 141 }
146 142
147#ifndef DVB_MMAP 143 dmxdev->may_do_mmap = 0;
144
145 /*
146 * The logic here is a little tricky due to the ifdef.
147 *
148 * The ringbuffer is used for both read and mmap.
149 *
150 * It is not needed, however, on two situations:
151 * - Write devices (access with O_WRONLY);
152 * - For duplex device nodes, opened with O_RDWR.
153 */
154
148 if ((file->f_flags & O_ACCMODE) == O_RDONLY) 155 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
149 need_ringbuffer = true; 156 need_ringbuffer = true;
150#else 157 else if ((file->f_flags & O_ACCMODE) == O_RDWR) {
151 if ((file->f_flags & O_ACCMODE) == O_RDWR) {
152 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { 158 if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
159#ifdef CONFIG_DVB_MMAP
160 dmxdev->may_do_mmap = 1;
161 need_ringbuffer = true;
162#else
153 mutex_unlock(&dmxdev->mutex); 163 mutex_unlock(&dmxdev->mutex);
154 return -EOPNOTSUPP; 164 return -EOPNOTSUPP;
165#endif
155 } 166 }
156 } 167 }
157#endif
158 168
159 if (need_ringbuffer) { 169 if (need_ringbuffer) {
160 void *mem; 170 void *mem;
@@ -169,8 +179,9 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
169 return -ENOMEM; 179 return -ENOMEM;
170 } 180 }
171 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); 181 dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
172 dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", 182 if (dmxdev->may_do_mmap)
173 file->f_flags & O_NONBLOCK); 183 dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
184 file->f_flags & O_NONBLOCK);
174 dvbdev->readers--; 185 dvbdev->readers--;
175 } 186 }
176 187
@@ -200,11 +211,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
200{ 211{
201 struct dvb_device *dvbdev = file->private_data; 212 struct dvb_device *dvbdev = file->private_data;
202 struct dmxdev *dmxdev = dvbdev->priv; 213 struct dmxdev *dmxdev = dvbdev->priv;
203#ifndef DVB_MMAP
204 bool need_ringbuffer = false;
205#else
206 const bool need_ringbuffer = true;
207#endif
208 214
209 mutex_lock(&dmxdev->mutex); 215 mutex_lock(&dmxdev->mutex);
210 216
@@ -213,15 +219,14 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
213 dmxdev->demux->connect_frontend(dmxdev->demux, 219 dmxdev->demux->connect_frontend(dmxdev->demux,
214 dmxdev->dvr_orig_fe); 220 dmxdev->dvr_orig_fe);
215 } 221 }
216#ifndef DVB_MMAP
217 if ((file->f_flags & O_ACCMODE) == O_RDONLY)
218 need_ringbuffer = true;
219#endif
220 222
221 if (need_ringbuffer) { 223 if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
222 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) 224 dmxdev->may_do_mmap) {
223 dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); 225 if (dmxdev->may_do_mmap) {
224 dvb_vb2_release(&dmxdev->dvr_vb2_ctx); 226 if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
227 dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
228 dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
229 }
225 dvbdev->readers++; 230 dvbdev->readers++;
226 if (dmxdev->dvr_buffer.data) { 231 if (dmxdev->dvr_buffer.data) {
227 void *mem = dmxdev->dvr_buffer.data; 232 void *mem = dmxdev->dvr_buffer.data;
@@ -380,7 +385,8 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
380 385
381static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, 386static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
382 const u8 *buffer2, size_t buffer2_len, 387 const u8 *buffer2, size_t buffer2_len,
383 struct dmx_section_filter *filter) 388 struct dmx_section_filter *filter,
389 u32 *buffer_flags)
384{ 390{
385 struct dmxdev_filter *dmxdevfilter = filter->priv; 391 struct dmxdev_filter *dmxdevfilter = filter->priv;
386 int ret; 392 int ret;
@@ -399,10 +405,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
399 dprintk("section callback %*ph\n", 6, buffer1); 405 dprintk("section callback %*ph\n", 6, buffer1);
400 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { 406 if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) {
401 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, 407 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
402 buffer1, buffer1_len); 408 buffer1, buffer1_len,
409 buffer_flags);
403 if (ret == buffer1_len) 410 if (ret == buffer1_len)
404 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, 411 ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
405 buffer2, buffer2_len); 412 buffer2, buffer2_len,
413 buffer_flags);
406 } else { 414 } else {
407 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, 415 ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
408 buffer1, buffer1_len); 416 buffer1, buffer1_len);
@@ -422,11 +430,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
422 430
423static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, 431static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
424 const u8 *buffer2, size_t buffer2_len, 432 const u8 *buffer2, size_t buffer2_len,
425 struct dmx_ts_feed *feed) 433 struct dmx_ts_feed *feed,
434 u32 *buffer_flags)
426{ 435{
427 struct dmxdev_filter *dmxdevfilter = feed->priv; 436 struct dmxdev_filter *dmxdevfilter = feed->priv;
428 struct dvb_ringbuffer *buffer; 437 struct dvb_ringbuffer *buffer;
429#ifdef DVB_MMAP 438#ifdef CONFIG_DVB_MMAP
430 struct dvb_vb2_ctx *ctx; 439 struct dvb_vb2_ctx *ctx;
431#endif 440#endif
432 int ret; 441 int ret;
@@ -440,20 +449,22 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
440 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || 449 if (dmxdevfilter->params.pes.output == DMX_OUT_TAP ||
441 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { 450 dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
442 buffer = &dmxdevfilter->buffer; 451 buffer = &dmxdevfilter->buffer;
443#ifdef DVB_MMAP 452#ifdef CONFIG_DVB_MMAP
444 ctx = &dmxdevfilter->vb2_ctx; 453 ctx = &dmxdevfilter->vb2_ctx;
445#endif 454#endif
446 } else { 455 } else {
447 buffer = &dmxdevfilter->dev->dvr_buffer; 456 buffer = &dmxdevfilter->dev->dvr_buffer;
448#ifdef DVB_MMAP 457#ifdef CONFIG_DVB_MMAP
449 ctx = &dmxdevfilter->dev->dvr_vb2_ctx; 458 ctx = &dmxdevfilter->dev->dvr_vb2_ctx;
450#endif 459#endif
451 } 460 }
452 461
453 if (dvb_vb2_is_streaming(ctx)) { 462 if (dvb_vb2_is_streaming(ctx)) {
454 ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len); 463 ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len,
464 buffer_flags);
455 if (ret == buffer1_len) 465 if (ret == buffer1_len)
456 ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len); 466 ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len,
467 buffer_flags);
457 } else { 468 } else {
458 if (buffer->error) { 469 if (buffer->error) {
459 spin_unlock(&dmxdevfilter->dev->lock); 470 spin_unlock(&dmxdevfilter->dev->lock);
@@ -802,6 +813,12 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
802 mutex_init(&dmxdevfilter->mutex); 813 mutex_init(&dmxdevfilter->mutex);
803 file->private_data = dmxdevfilter; 814 file->private_data = dmxdevfilter;
804 815
816#ifdef CONFIG_DVB_MMAP
817 dmxdev->may_do_mmap = 1;
818#else
819 dmxdev->may_do_mmap = 0;
820#endif
821
805 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 822 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
806 dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", 823 dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter",
807 file->f_flags & O_NONBLOCK); 824 file->f_flags & O_NONBLOCK);
@@ -1111,7 +1128,7 @@ static int dvb_demux_do_ioctl(struct file *file,
1111 mutex_unlock(&dmxdevfilter->mutex); 1128 mutex_unlock(&dmxdevfilter->mutex);
1112 break; 1129 break;
1113 1130
1114#ifdef DVB_MMAP 1131#ifdef CONFIG_DVB_MMAP
1115 case DMX_REQBUFS: 1132 case DMX_REQBUFS:
1116 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { 1133 if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
1117 mutex_unlock(&dmxdev->mutex); 1134 mutex_unlock(&dmxdev->mutex);
@@ -1160,7 +1177,7 @@ static int dvb_demux_do_ioctl(struct file *file,
1160 break; 1177 break;
1161#endif 1178#endif
1162 default: 1179 default:
1163 ret = -EINVAL; 1180 ret = -ENOTTY;
1164 break; 1181 break;
1165 } 1182 }
1166 mutex_unlock(&dmxdev->mutex); 1183 mutex_unlock(&dmxdev->mutex);
@@ -1199,13 +1216,16 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
1199 return mask; 1216 return mask;
1200} 1217}
1201 1218
1202#ifdef DVB_MMAP 1219#ifdef CONFIG_DVB_MMAP
1203static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) 1220static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)
1204{ 1221{
1205 struct dmxdev_filter *dmxdevfilter = file->private_data; 1222 struct dmxdev_filter *dmxdevfilter = file->private_data;
1206 struct dmxdev *dmxdev = dmxdevfilter->dev; 1223 struct dmxdev *dmxdev = dmxdevfilter->dev;
1207 int ret; 1224 int ret;
1208 1225
1226 if (!dmxdev->may_do_mmap)
1227 return -ENOTTY;
1228
1209 if (mutex_lock_interruptible(&dmxdev->mutex)) 1229 if (mutex_lock_interruptible(&dmxdev->mutex))
1210 return -ERESTARTSYS; 1230 return -ERESTARTSYS;
1211 1231
@@ -1249,7 +1269,7 @@ static const struct file_operations dvb_demux_fops = {
1249 .release = dvb_demux_release, 1269 .release = dvb_demux_release,
1250 .poll = dvb_demux_poll, 1270 .poll = dvb_demux_poll,
1251 .llseek = default_llseek, 1271 .llseek = default_llseek,
1252#ifdef DVB_MMAP 1272#ifdef CONFIG_DVB_MMAP
1253 .mmap = dvb_demux_mmap, 1273 .mmap = dvb_demux_mmap,
1254#endif 1274#endif
1255}; 1275};
@@ -1280,7 +1300,7 @@ static int dvb_dvr_do_ioctl(struct file *file,
1280 ret = dvb_dvr_set_buffer_size(dmxdev, arg); 1300 ret = dvb_dvr_set_buffer_size(dmxdev, arg);
1281 break; 1301 break;
1282 1302
1283#ifdef DVB_MMAP 1303#ifdef CONFIG_DVB_MMAP
1284 case DMX_REQBUFS: 1304 case DMX_REQBUFS:
1285 ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); 1305 ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg);
1286 break; 1306 break;
@@ -1304,7 +1324,7 @@ static int dvb_dvr_do_ioctl(struct file *file,
1304 break; 1324 break;
1305#endif 1325#endif
1306 default: 1326 default:
1307 ret = -EINVAL; 1327 ret = -ENOTTY;
1308 break; 1328 break;
1309 } 1329 }
1310 mutex_unlock(&dmxdev->mutex); 1330 mutex_unlock(&dmxdev->mutex);
@@ -1322,11 +1342,6 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
1322 struct dvb_device *dvbdev = file->private_data; 1342 struct dvb_device *dvbdev = file->private_data;
1323 struct dmxdev *dmxdev = dvbdev->priv; 1343 struct dmxdev *dmxdev = dvbdev->priv;
1324 __poll_t mask = 0; 1344 __poll_t mask = 0;
1325#ifndef DVB_MMAP
1326 bool need_ringbuffer = false;
1327#else
1328 const bool need_ringbuffer = true;
1329#endif
1330 1345
1331 dprintk("%s\n", __func__); 1346 dprintk("%s\n", __func__);
1332 1347
@@ -1337,11 +1352,8 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
1337 1352
1338 poll_wait(file, &dmxdev->dvr_buffer.queue, wait); 1353 poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
1339 1354
1340#ifndef DVB_MMAP 1355 if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
1341 if ((file->f_flags & O_ACCMODE) == O_RDONLY) 1356 dmxdev->may_do_mmap) {
1342 need_ringbuffer = true;
1343#endif
1344 if (need_ringbuffer) {
1345 if (dmxdev->dvr_buffer.error) 1357 if (dmxdev->dvr_buffer.error)
1346 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); 1358 mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
1347 1359
@@ -1353,13 +1365,16 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
1353 return mask; 1365 return mask;
1354} 1366}
1355 1367
1356#ifdef DVB_MMAP 1368#ifdef CONFIG_DVB_MMAP
1357static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) 1369static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)
1358{ 1370{
1359 struct dvb_device *dvbdev = file->private_data; 1371 struct dvb_device *dvbdev = file->private_data;
1360 struct dmxdev *dmxdev = dvbdev->priv; 1372 struct dmxdev *dmxdev = dvbdev->priv;
1361 int ret; 1373 int ret;
1362 1374
1375 if (!dmxdev->may_do_mmap)
1376 return -ENOTTY;
1377
1363 if (dmxdev->exit) 1378 if (dmxdev->exit)
1364 return -ENODEV; 1379 return -ENODEV;
1365 1380
@@ -1381,7 +1396,7 @@ static const struct file_operations dvb_dvr_fops = {
1381 .release = dvb_dvr_release, 1396 .release = dvb_dvr_release,
1382 .poll = dvb_dvr_poll, 1397 .poll = dvb_dvr_poll,
1383 .llseek = default_llseek, 1398 .llseek = default_llseek,
1384#ifdef DVB_MMAP 1399#ifdef CONFIG_DVB_MMAP
1385 .mmap = dvb_dvr_mmap, 1400 .mmap = dvb_dvr_mmap,
1386#endif 1401#endif
1387}; 1402};
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 210eed0269b0..f45091246bdc 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -55,6 +55,17 @@ MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
55 dprintk(x); \ 55 dprintk(x); \
56} while (0) 56} while (0)
57 57
58#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
59# define dprintk_sect_loss(x...) dprintk(x)
60#else
61# define dprintk_sect_loss(x...)
62#endif
63
64#define set_buf_flags(__feed, __flag) \
65 do { \
66 (__feed)->buffer_flags |= (__flag); \
67 } while (0)
68
58/****************************************************************************** 69/******************************************************************************
59 * static inlined helper functions 70 * static inlined helper functions
60 ******************************************************************************/ 71 ******************************************************************************/
@@ -104,31 +115,30 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
104{ 115{
105 int count = payload(buf); 116 int count = payload(buf);
106 int p; 117 int p;
107#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
108 int ccok; 118 int ccok;
109 u8 cc; 119 u8 cc;
110#endif
111 120
112 if (count == 0) 121 if (count == 0)
113 return -1; 122 return -1;
114 123
115 p = 188 - count; 124 p = 188 - count;
116 125
117#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
118 cc = buf[3] & 0x0f; 126 cc = buf[3] & 0x0f;
119 ccok = ((feed->cc + 1) & 0x0f) == cc; 127 ccok = ((feed->cc + 1) & 0x0f) == cc;
120 feed->cc = cc; 128 feed->cc = cc;
121 if (!ccok) 129 if (!ccok) {
122 dprintk("missed packet: %d instead of %d!\n", 130 set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
123 cc, (feed->cc + 1) & 0x0f); 131 dprintk_sect_loss("missed packet: %d instead of %d!\n",
124#endif 132 cc, (feed->cc + 1) & 0x0f);
133 }
125 134
126 if (buf[1] & 0x40) // PUSI ? 135 if (buf[1] & 0x40) // PUSI ?
127 feed->peslen = 0xfffa; 136 feed->peslen = 0xfffa;
128 137
129 feed->peslen += count; 138 feed->peslen += count;
130 139
131 return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts); 140 return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts,
141 &feed->buffer_flags);
132} 142}
133 143
134static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, 144static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -150,7 +160,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
150 return 0; 160 return 0;
151 161
152 return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, 162 return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
153 NULL, 0, &f->filter); 163 NULL, 0, &f->filter, &feed->buffer_flags);
154} 164}
155 165
156static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) 166static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
@@ -169,8 +179,10 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
169 if (sec->check_crc) { 179 if (sec->check_crc) {
170 section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0); 180 section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);
171 if (section_syntax_indicator && 181 if (section_syntax_indicator &&
172 demux->check_crc32(feed, sec->secbuf, sec->seclen)) 182 demux->check_crc32(feed, sec->secbuf, sec->seclen)) {
183 set_buf_flags(feed, DMX_BUFFER_FLAG_HAD_CRC32_DISCARD);
173 return -1; 184 return -1;
185 }
174 } 186 }
175 187
176 do { 188 do {
@@ -187,7 +199,6 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
187{ 199{
188 struct dmx_section_feed *sec = &feed->feed.sec; 200 struct dmx_section_feed *sec = &feed->feed.sec;
189 201
190#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
191 if (sec->secbufp < sec->tsfeedp) { 202 if (sec->secbufp < sec->tsfeedp) {
192 int n = sec->tsfeedp - sec->secbufp; 203 int n = sec->tsfeedp - sec->secbufp;
193 204
@@ -197,12 +208,13 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
197 * but just first and last. 208 * but just first and last.
198 */ 209 */
199 if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { 210 if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) {
200 dprintk("section ts padding loss: %d/%d\n", 211 set_buf_flags(feed,
201 n, sec->tsfeedp); 212 DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
202 dprintk("pad data: %*ph\n", n, sec->secbuf); 213 dprintk_sect_loss("section ts padding loss: %d/%d\n",
214 n, sec->tsfeedp);
215 dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf);
203 } 216 }
204 } 217 }
205#endif
206 218
207 sec->tsfeedp = sec->secbufp = sec->seclen = 0; 219 sec->tsfeedp = sec->secbufp = sec->seclen = 0;
208 sec->secbuf = sec->secbuf_base; 220 sec->secbuf = sec->secbuf_base;
@@ -237,11 +249,10 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
237 return 0; 249 return 0;
238 250
239 if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { 251 if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) {
240#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG 252 set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
241 dprintk("section buffer full loss: %d/%d\n", 253 dprintk_sect_loss("section buffer full loss: %d/%d\n",
242 sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, 254 sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
243 DMX_MAX_SECFEED_SIZE); 255 DMX_MAX_SECFEED_SIZE);
244#endif
245 len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp; 256 len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp;
246 } 257 }
247 258
@@ -269,12 +280,13 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
269 sec->seclen = seclen; 280 sec->seclen = seclen;
270 sec->crc_val = ~0; 281 sec->crc_val = ~0;
271 /* dump [secbuf .. secbuf+seclen) */ 282 /* dump [secbuf .. secbuf+seclen) */
272 if (feed->pusi_seen) 283 if (feed->pusi_seen) {
273 dvb_dmx_swfilter_section_feed(feed); 284 dvb_dmx_swfilter_section_feed(feed);
274#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG 285 } else {
275 else 286 set_buf_flags(feed,
276 dprintk("pusi not seen, discarding section data\n"); 287 DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
277#endif 288 dprintk_sect_loss("pusi not seen, discarding section data\n");
289 }
278 sec->secbufp += seclen; /* secbufp and secbuf moving together is */ 290 sec->secbufp += seclen; /* secbufp and secbuf moving together is */
279 sec->secbuf += seclen; /* redundant but saves pointer arithmetic */ 291 sec->secbuf += seclen; /* redundant but saves pointer arithmetic */
280 } 292 }
@@ -307,18 +319,22 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
307 } 319 }
308 320
309 if (!ccok || dc_i) { 321 if (!ccok || dc_i) {
310#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG 322 if (dc_i) {
311 if (dc_i) 323 set_buf_flags(feed,
312 dprintk("%d frame with disconnect indicator\n", 324 DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR);
325 dprintk_sect_loss("%d frame with disconnect indicator\n",
313 cc); 326 cc);
314 else 327 } else {
315 dprintk("discontinuity: %d instead of %d. %d bytes lost\n", 328 set_buf_flags(feed,
329 DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
330 dprintk_sect_loss("discontinuity: %d instead of %d. %d bytes lost\n",
316 cc, (feed->cc + 1) & 0x0f, count + 4); 331 cc, (feed->cc + 1) & 0x0f, count + 4);
332 }
317 /* 333 /*
318 * those bytes under sume circumstances will again be reported 334 * those bytes under some circumstances will again be reported
319 * in the following dvb_dmx_swfilter_section_new 335 * in the following dvb_dmx_swfilter_section_new
320 */ 336 */
321#endif 337
322 /* 338 /*
323 * Discontinuity detected. Reset pusi_seen to 339 * Discontinuity detected. Reset pusi_seen to
324 * stop feeding of suspicious data until next PUSI=1 arrives 340 * stop feeding of suspicious data until next PUSI=1 arrives
@@ -326,6 +342,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
326 * FIXME: does it make sense if the MPEG-TS is the one 342 * FIXME: does it make sense if the MPEG-TS is the one
327 * reporting discontinuity? 343 * reporting discontinuity?
328 */ 344 */
345
329 feed->pusi_seen = false; 346 feed->pusi_seen = false;
330 dvb_dmx_swfilter_section_new(feed); 347 dvb_dmx_swfilter_section_new(feed);
331 } 348 }
@@ -345,11 +362,11 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
345 dvb_dmx_swfilter_section_new(feed); 362 dvb_dmx_swfilter_section_new(feed);
346 dvb_dmx_swfilter_section_copy_dump(feed, after, 363 dvb_dmx_swfilter_section_copy_dump(feed, after,
347 after_len); 364 after_len);
365 } else if (count > 0) {
366 set_buf_flags(feed,
367 DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
368 dprintk_sect_loss("PUSI=1 but %d bytes lost\n", count);
348 } 369 }
349#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
350 else if (count > 0)
351 dprintk("PUSI=1 but %d bytes lost\n", count);
352#endif
353 } else { 370 } else {
354 /* PUSI=0 (is not set), no section boundary */ 371 /* PUSI=0 (is not set), no section boundary */
355 dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); 372 dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count);
@@ -369,7 +386,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
369 if (feed->ts_type & TS_PAYLOAD_ONLY) 386 if (feed->ts_type & TS_PAYLOAD_ONLY)
370 dvb_dmx_swfilter_payload(feed, buf); 387 dvb_dmx_swfilter_payload(feed, buf);
371 else 388 else
372 feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); 389 feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
390 &feed->buffer_flags);
373 } 391 }
374 /* Used only on full-featured devices */ 392 /* Used only on full-featured devices */
375 if (feed->ts_type & TS_DECODER) 393 if (feed->ts_type & TS_DECODER)
@@ -430,6 +448,11 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
430 } 448 }
431 449
432 if (buf[1] & 0x80) { 450 if (buf[1] & 0x80) {
451 list_for_each_entry(feed, &demux->feed_list, list_head) {
452 if ((feed->pid != pid) && (feed->pid != 0x2000))
453 continue;
454 set_buf_flags(feed, DMX_BUFFER_FLAG_TEI);
455 }
433 dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", 456 dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",
434 pid, buf[1]); 457 pid, buf[1]);
435 /* data in this packet can't be trusted - drop it unless 458 /* data in this packet can't be trusted - drop it unless
@@ -445,6 +468,13 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
445 (demux->cnt_storage[pid] + 1) & 0xf; 468 (demux->cnt_storage[pid] + 1) & 0xf;
446 469
447 if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { 470 if ((buf[3] & 0xf) != demux->cnt_storage[pid]) {
471 list_for_each_entry(feed, &demux->feed_list, list_head) {
472 if ((feed->pid != pid) && (feed->pid != 0x2000))
473 continue;
474 set_buf_flags(feed,
475 DMX_BUFFER_PKT_COUNTER_MISMATCH);
476 }
477
448 dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", 478 dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
449 pid, demux->cnt_storage[pid], 479 pid, demux->cnt_storage[pid],
450 buf[3] & 0xf); 480 buf[3] & 0xf);
@@ -466,7 +496,8 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
466 if (feed->pid == pid) 496 if (feed->pid == pid)
467 dvb_dmx_swfilter_packet_type(feed, buf); 497 dvb_dmx_swfilter_packet_type(feed, buf);
468 else if (feed->pid == 0x2000) 498 else if (feed->pid == 0x2000)
469 feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); 499 feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
500 &feed->buffer_flags);
470 } 501 }
471} 502}
472 503
@@ -585,7 +616,8 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
585 616
586 spin_lock_irqsave(&demux->lock, flags); 617 spin_lock_irqsave(&demux->lock, flags);
587 618
588 demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts); 619 demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts,
620 &demux->feed->buffer_flags);
589 621
590 spin_unlock_irqrestore(&demux->lock, flags); 622 spin_unlock_irqrestore(&demux->lock, flags);
591} 623}
@@ -785,6 +817,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
785 feed->demux = demux; 817 feed->demux = demux;
786 feed->pid = 0xffff; 818 feed->pid = 0xffff;
787 feed->peslen = 0xfffa; 819 feed->peslen = 0xfffa;
820 feed->buffer_flags = 0;
788 821
789 (*ts_feed) = &feed->feed.ts; 822 (*ts_feed) = &feed->feed.ts;
790 (*ts_feed)->parent = dmx; 823 (*ts_feed)->parent = dmx;
@@ -1042,6 +1075,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
1042 dvbdmxfeed->cb.sec = callback; 1075 dvbdmxfeed->cb.sec = callback;
1043 dvbdmxfeed->demux = dvbdmx; 1076 dvbdmxfeed->demux = dvbdmx;
1044 dvbdmxfeed->pid = 0xffff; 1077 dvbdmxfeed->pid = 0xffff;
1078 dvbdmxfeed->buffer_flags = 0;
1045 dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; 1079 dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
1046 dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0; 1080 dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
1047 dvbdmxfeed->feed.sec.tsfeedp = 0; 1081 dvbdmxfeed->feed.sec.tsfeedp = 0;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index b6c7eec863b9..ba39f9942e1d 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -883,7 +883,8 @@ static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
883 883
884static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len, 884static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
885 const u8 *buffer2, size_t buffer2_len, 885 const u8 *buffer2, size_t buffer2_len,
886 struct dmx_ts_feed *feed) 886 struct dmx_ts_feed *feed,
887 u32 *buffer_flags)
887{ 888{
888 struct net_device *dev = feed->priv; 889 struct net_device *dev = feed->priv;
889 890
@@ -992,7 +993,7 @@ static void dvb_net_sec(struct net_device *dev,
992 993
993static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, 994static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
994 const u8 *buffer2, size_t buffer2_len, 995 const u8 *buffer2, size_t buffer2_len,
995 struct dmx_section_filter *filter) 996 struct dmx_section_filter *filter, u32 *buffer_flags)
996{ 997{
997 struct net_device *dev = filter->priv; 998 struct net_device *dev = filter->priv;
998 999
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 763145d74e83..b811adf88afa 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -256,7 +256,8 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx)
256} 256}
257 257
258int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, 258int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
259 const unsigned char *src, int len) 259 const unsigned char *src, int len,
260 enum dmx_buffer_flags *buffer_flags)
260{ 261{
261 unsigned long flags = 0; 262 unsigned long flags = 0;
262 void *vbuf = NULL; 263 void *vbuf = NULL;
@@ -264,15 +265,17 @@ int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
264 unsigned char *psrc = (unsigned char *)src; 265 unsigned char *psrc = (unsigned char *)src;
265 int ll = 0; 266 int ll = 0;
266 267
267 dprintk(3, "[%s] %d bytes are rcvd\n", ctx->name, len); 268 /*
268 if (!src) { 269 * normal case: This func is called twice from demux driver
269 dprintk(3, "[%s]:NULL pointer src\n", ctx->name); 270 * one with valid src pointer, second time with NULL pointer
270 /**normal case: This func is called twice from demux driver 271 */
271 * once with valid src pointer, second time with NULL pointer 272 if (!src || !len)
272 */
273 return 0; 273 return 0;
274 }
275 spin_lock_irqsave(&ctx->slock, flags); 274 spin_lock_irqsave(&ctx->slock, flags);
275 if (buffer_flags && *buffer_flags) {
276 ctx->flags |= *buffer_flags;
277 *buffer_flags = 0;
278 }
276 while (todo) { 279 while (todo) {
277 if (!ctx->buf) { 280 if (!ctx->buf) {
278 if (list_empty(&ctx->dvb_q)) { 281 if (list_empty(&ctx->dvb_q)) {
@@ -395,6 +398,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
395 398
396int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) 399int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
397{ 400{
401 unsigned long flags;
398 int ret; 402 int ret;
399 403
400 ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); 404 ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking);
@@ -402,7 +406,16 @@ int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
402 dprintk(1, "[%s] errno=%d\n", ctx->name, ret); 406 dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
403 return ret; 407 return ret;
404 } 408 }
405 dprintk(5, "[%s] index=%d\n", ctx->name, b->index); 409
410 spin_lock_irqsave(&ctx->slock, flags);
411 b->count = ctx->count++;
412 b->flags = ctx->flags;
413 ctx->flags = 0;
414 spin_unlock_irqrestore(&ctx->slock, flags);
415
416 dprintk(5, "[%s] index=%d, count=%d, flags=%d\n",
417 ctx->name, b->index, ctx->count, b->flags);
418
406 419
407 return 0; 420 return 0;
408} 421}
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 50bce68ffd66..65d157fe76d1 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
1262 * New users must use I2C client binding directly! 1262 * New users must use I2C client binding directly!
1263 */ 1263 */
1264struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, 1264struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
1265 struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) 1265 struct i2c_adapter *i2c,
1266 struct i2c_adapter **tuner_i2c_adapter)
1266{ 1267{
1267 struct i2c_client *client; 1268 struct i2c_client *client;
1268 struct i2c_board_info board_info; 1269 struct i2c_board_info board_info;
1269 struct m88ds3103_platform_data pdata; 1270 struct m88ds3103_platform_data pdata = {};
1270 1271
1271 pdata.clk = cfg->clock; 1272 pdata.clk = cfg->clock;
1272 pdata.i2c_wr_max = cfg->i2c_wr_max; 1273 pdata.i2c_wr_max = cfg->i2c_wr_max;
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
1409 case M88DS3103_CHIP_ID: 1410 case M88DS3103_CHIP_ID:
1410 break; 1411 break;
1411 default: 1412 default:
1413 ret = -ENODEV;
1414 dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
1412 goto err_kfree; 1415 goto err_kfree;
1413 } 1416 }
1414 1417
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3c1851984b90..2476d812f669 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -505,80 +505,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
505 /* FIXME: Current api doesn't handle all VBI types, those not 505 /* FIXME: Current api doesn't handle all VBI types, those not
506 yet supported are placed under #if 0 */ 506 yet supported are placed under #if 0 */
507#if 0 507#if 0
508 {0x010, /* Teletext, SECAM, WST System A */ 508 [0] = {0x010, /* Teletext, SECAM, WST System A */
509 {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, 509 {V4L2_SLICED_TELETEXT_SECAM,6,23,1},
510 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, 510 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,
511 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } 511 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }
512 }, 512 },
513#endif 513#endif
514 {0x030, /* Teletext, PAL, WST System B */ 514 [1] = {0x030, /* Teletext, PAL, WST System B */
515 {V4L2_SLICED_TELETEXT_B,6,22,1}, 515 {V4L2_SLICED_TELETEXT_B,6,22,1},
516 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, 516 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,
517 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } 517 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }
518 }, 518 },
519#if 0 519#if 0
520 {0x050, /* Teletext, PAL, WST System C */ 520 [2] = {0x050, /* Teletext, PAL, WST System C */
521 {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, 521 {V4L2_SLICED_TELETEXT_PAL_C,6,22,1},
522 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 522 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
523 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } 523 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
524 }, 524 },
525 {0x070, /* Teletext, NTSC, WST System B */ 525 [3] = {0x070, /* Teletext, NTSC, WST System B */
526 {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, 526 {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1},
527 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, 527 { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23,
528 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } 528 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
529 }, 529 },
530 {0x090, /* Tetetext, NTSC NABTS System C */ 530 [4] = {0x090, /* Tetetext, NTSC NABTS System C */
531 {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, 531 {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1},
532 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, 532 { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
533 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } 533 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 }
534 }, 534 },
535 {0x0b0, /* Teletext, NTSC-J, NABTS System D */ 535 [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */
536 {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, 536 {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1},
537 { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, 537 { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23,
538 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } 538 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
539 }, 539 },
540 {0x0d0, /* Closed Caption, PAL/SECAM */ 540 [6] = {0x0d0, /* Closed Caption, PAL/SECAM */
541 {V4L2_SLICED_CAPTION_625,22,22,1}, 541 {V4L2_SLICED_CAPTION_625,22,22,1},
542 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 542 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
543 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } 543 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
544 }, 544 },
545#endif 545#endif
546 {0x0f0, /* Closed Caption, NTSC */ 546 [7] = {0x0f0, /* Closed Caption, NTSC */
547 {V4L2_SLICED_CAPTION_525,21,21,1}, 547 {V4L2_SLICED_CAPTION_525,21,21,1},
548 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, 548 { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
549 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } 549 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
550 }, 550 },
551 {0x110, /* Wide Screen Signal, PAL/SECAM */ 551 [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */
552 {V4L2_SLICED_WSS_625,23,23,1}, 552 {V4L2_SLICED_WSS_625,23,23,1},
553 { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, 553 { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,
554 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } 554 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }
555 }, 555 },
556#if 0 556#if 0
557 {0x130, /* Wide Screen Signal, NTSC C */ 557 [9] = {0x130, /* Wide Screen Signal, NTSC C */
558 {V4L2_SLICED_WSS_525,20,20,1}, 558 {V4L2_SLICED_WSS_525,20,20,1},
559 { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, 559 { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,
560 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } 560 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 }
561 }, 561 },
562 {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ 562 [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */
563 {V4l2_SLICED_VITC_625,6,22,0}, 563 {V4l2_SLICED_VITC_625,6,22,0},
564 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 564 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
565 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } 565 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
566 }, 566 },
567 {0x170, /* Vertical Interval Timecode (VITC), NTSC */ 567 [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */
568 {V4l2_SLICED_VITC_525,10,20,0}, 568 {V4l2_SLICED_VITC_525,10,20,0},
569 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, 569 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
570 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } 570 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
571 }, 571 },
572#endif 572#endif
573 {0x190, /* Video Program System (VPS), PAL */ 573 [12] = {0x190, /* Video Program System (VPS), PAL */
574 {V4L2_SLICED_VPS,16,16,0}, 574 {V4L2_SLICED_VPS,16,16,0},
575 { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, 575 { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,
576 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } 576 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 }
577 }, 577 },
578 /* 0x1d0 User programmable */ 578 /* 0x1d0 User programmable */
579
580 /* End of struct */
581 { (u16)-1 }
582}; 579};
583 580
584static int tvp5150_write_inittab(struct v4l2_subdev *sd, 581static int tvp5150_write_inittab(struct v4l2_subdev *sd,
@@ -591,10 +588,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd,
591 return 0; 588 return 0;
592} 589}
593 590
594static int tvp5150_vdp_init(struct v4l2_subdev *sd, 591static int tvp5150_vdp_init(struct v4l2_subdev *sd)
595 const struct i2c_vbi_ram_value *regs)
596{ 592{
597 unsigned int i; 593 unsigned int i;
594 int j;
598 595
599 /* Disable Full Field */ 596 /* Disable Full Field */
600 tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); 597 tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0);
@@ -604,14 +601,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,
604 tvp5150_write(sd, i, 0xff); 601 tvp5150_write(sd, i, 0xff);
605 602
606 /* Load Ram Table */ 603 /* Load Ram Table */
607 while (regs->reg != (u16)-1) { 604 for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) {
605 const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j];
606
607 if (!regs->type.vbi_type)
608 continue;
609
608 tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); 610 tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8);
609 tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); 611 tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg);
610 612
611 for (i = 0; i < 16; i++) 613 for (i = 0; i < 16; i++)
612 tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); 614 tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]);
613
614 regs++;
615 } 615 }
616 return 0; 616 return 0;
617} 617}
@@ -620,19 +620,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,
620static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, 620static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
621 struct v4l2_sliced_vbi_cap *cap) 621 struct v4l2_sliced_vbi_cap *cap)
622{ 622{
623 const struct i2c_vbi_ram_value *regs = vbi_ram_default; 623 int line, i;
624 int line;
625 624
626 dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); 625 dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");
627 memset(cap, 0, sizeof *cap); 626 memset(cap, 0, sizeof *cap);
628 627
629 while (regs->reg != (u16)-1 ) { 628 for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) {
630 for (line=regs->type.ini_line;line<=regs->type.end_line;line++) { 629 const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i];
630
631 if (!regs->type.vbi_type)
632 continue;
633
634 for (line = regs->type.ini_line;
635 line <= regs->type.end_line;
636 line++) {
631 cap->service_lines[0][line] |= regs->type.vbi_type; 637 cap->service_lines[0][line] |= regs->type.vbi_type;
632 } 638 }
633 cap->service_set |= regs->type.vbi_type; 639 cap->service_set |= regs->type.vbi_type;
634
635 regs++;
636 } 640 }
637 return 0; 641 return 0;
638} 642}
@@ -651,14 +655,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
651 * MSB = field2 655 * MSB = field2
652 */ 656 */
653static int tvp5150_set_vbi(struct v4l2_subdev *sd, 657static int tvp5150_set_vbi(struct v4l2_subdev *sd,
654 const struct i2c_vbi_ram_value *regs,
655 unsigned int type,u8 flags, int line, 658 unsigned int type,u8 flags, int line,
656 const int fields) 659 const int fields)
657{ 660{
658 struct tvp5150 *decoder = to_tvp5150(sd); 661 struct tvp5150 *decoder = to_tvp5150(sd);
659 v4l2_std_id std = decoder->norm; 662 v4l2_std_id std = decoder->norm;
660 u8 reg; 663 u8 reg;
661 int pos = 0; 664 int i, pos = 0;
662 665
663 if (std == V4L2_STD_ALL) { 666 if (std == V4L2_STD_ALL) {
664 dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); 667 dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
@@ -671,19 +674,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
671 if (line < 6 || line > 27) 674 if (line < 6 || line > 27)
672 return 0; 675 return 0;
673 676
674 while (regs->reg != (u16)-1) { 677 for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) {
678 const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i];
679
680 if (!regs->type.vbi_type)
681 continue;
682
675 if ((type & regs->type.vbi_type) && 683 if ((type & regs->type.vbi_type) &&
676 (line >= regs->type.ini_line) && 684 (line >= regs->type.ini_line) &&
677 (line <= regs->type.end_line)) 685 (line <= regs->type.end_line))
678 break; 686 break;
679
680 regs++;
681 pos++; 687 pos++;
682 } 688 }
683 689
684 if (regs->reg == (u16)-1)
685 return 0;
686
687 type = pos | (flags & 0xf0); 690 type = pos | (flags & 0xf0);
688 reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; 691 reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
689 692
@@ -696,8 +699,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
696 return type; 699 return type;
697} 700}
698 701
699static int tvp5150_get_vbi(struct v4l2_subdev *sd, 702static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line)
700 const struct i2c_vbi_ram_value *regs, int line)
701{ 703{
702 struct tvp5150 *decoder = to_tvp5150(sd); 704 struct tvp5150 *decoder = to_tvp5150(sd);
703 v4l2_std_id std = decoder->norm; 705 v4l2_std_id std = decoder->norm;
@@ -726,8 +728,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
726 return 0; 728 return 0;
727 } 729 }
728 pos = ret & 0x0f; 730 pos = ret & 0x0f;
729 if (pos < 0x0f) 731 if (pos < ARRAY_SIZE(vbi_ram_default))
730 type |= regs[pos].type.vbi_type; 732 type |= vbi_ram_default[pos].type.vbi_type;
731 } 733 }
732 734
733 return type; 735 return type;
@@ -788,7 +790,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
788 tvp5150_write_inittab(sd, tvp5150_init_default); 790 tvp5150_write_inittab(sd, tvp5150_init_default);
789 791
790 /* Initializes VDP registers */ 792 /* Initializes VDP registers */
791 tvp5150_vdp_init(sd, vbi_ram_default); 793 tvp5150_vdp_init(sd);
792 794
793 /* Selects decoder input */ 795 /* Selects decoder input */
794 tvp5150_selmux(sd); 796 tvp5150_selmux(sd);
@@ -1121,8 +1123,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
1121 for (i = 0; i <= 23; i++) { 1123 for (i = 0; i <= 23; i++) {
1122 svbi->service_lines[1][i] = 0; 1124 svbi->service_lines[1][i] = 0;
1123 svbi->service_lines[0][i] = 1125 svbi->service_lines[0][i] =
1124 tvp5150_set_vbi(sd, vbi_ram_default, 1126 tvp5150_set_vbi(sd, svbi->service_lines[0][i],
1125 svbi->service_lines[0][i], 0xf0, i, 3); 1127 0xf0, i, 3);
1126 } 1128 }
1127 /* Enables FIFO */ 1129 /* Enables FIFO */
1128 tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); 1130 tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1);
@@ -1148,7 +1150,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
1148 1150
1149 for (i = 0; i <= 23; i++) { 1151 for (i = 0; i <= 23; i++) {
1150 svbi->service_lines[0][i] = 1152 svbi->service_lines[0][i] =
1151 tvp5150_get_vbi(sd, vbi_ram_default, i); 1153 tvp5150_get_vbi(sd, i);
1152 mask |= svbi->service_lines[0][i]; 1154 mask |= svbi->service_lines[0][i];
1153 } 1155 }
1154 svbi->service_set = mask; 1156 svbi->service_set = mask;
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index dc8e577b2f74..d6816effb878 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -324,14 +324,15 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
324 } 324 }
325 return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len, 325 return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
326 buffer2, buffer2_len, 326 buffer2, buffer2_len,
327 &dvbdmxfilter->filter); 327 &dvbdmxfilter->filter, NULL);
328 case DMX_TYPE_TS: 328 case DMX_TYPE_TS:
329 if (!(dvbdmxfilter->feed->ts_type & TS_PACKET)) 329 if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))
330 return 0; 330 return 0;
331 if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY) 331 if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)
332 return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len, 332 return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
333 buffer2, buffer2_len, 333 buffer2, buffer2_len,
334 &dvbdmxfilter->feed->feed.ts); 334 &dvbdmxfilter->feed->feed.ts,
335 NULL);
335 else 336 else
336 av7110_p2t_write(buffer1, buffer1_len, 337 av7110_p2t_write(buffer1, buffer1_len,
337 dvbdmxfilter->feed->pid, 338 dvbdmxfilter->feed->pid,
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 4daba76ec240..ef1bc17cdc4d 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -99,7 +99,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
99 buf[4] = buf[5] = 0; 99 buf[4] = buf[5] = 0;
100 if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY) 100 if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)
101 return dvbdmxfeed->cb.ts(buf, len, NULL, 0, 101 return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
102 &dvbdmxfeed->feed.ts); 102 &dvbdmxfeed->feed.ts, NULL);
103 else 103 else
104 return dvb_filter_pes2ts(p2t, buf, len, 1); 104 return dvb_filter_pes2ts(p2t, buf, len, 1);
105} 105}
@@ -109,7 +109,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)
109 struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv; 109 struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv;
110 110
111 dvbdmxfeed->cb.ts(data, 188, NULL, 0, 111 dvbdmxfeed->cb.ts(data, 188, NULL, 0,
112 &dvbdmxfeed->feed.ts); 112 &dvbdmxfeed->feed.ts, NULL);
113 return 0; 113 return 0;
114} 114}
115 115
@@ -814,7 +814,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
814 memcpy(obuf + l, buf + c, TS_SIZE - l); 814 memcpy(obuf + l, buf + c, TS_SIZE - l);
815 c = length; 815 c = length;
816 } 816 }
817 feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts); 817 feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);
818 pes_start = 0; 818 pes_start = 0;
819 } 819 }
820} 820}
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig
index 70521e0b4c53..bfaa806633df 100644
--- a/drivers/media/usb/au0828/Kconfig
+++ b/drivers/media/usb/au0828/Kconfig
@@ -1,7 +1,7 @@
1 1
2config VIDEO_AU0828 2config VIDEO_AU0828
3 tristate "Auvitek AU0828 support" 3 tristate "Auvitek AU0828 support"
4 depends on I2C && INPUT && DVB_CORE && USB 4 depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
5 select I2C_ALGOBIT 5 select I2C_ALGOBIT
6 select VIDEO_TVEEPROM 6 select VIDEO_TVEEPROM
7 select VIDEOBUF2_VMALLOC 7 select VIDEOBUF2_VMALLOC
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index a8900f5571f7..44ca66cb9b8f 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -428,7 +428,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)
428 struct ttusb_dec *dec = priv; 428 struct ttusb_dec *dec = priv;
429 429
430 dec->audio_filter->feed->cb.ts(data, 188, NULL, 0, 430 dec->audio_filter->feed->cb.ts(data, 188, NULL, 0,
431 &dec->audio_filter->feed->feed.ts); 431 &dec->audio_filter->feed->feed.ts, NULL);
432 432
433 return 0; 433 return 0;
434} 434}
@@ -438,7 +438,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data)
438 struct ttusb_dec *dec = priv; 438 struct ttusb_dec *dec = priv;
439 439
440 dec->video_filter->feed->cb.ts(data, 188, NULL, 0, 440 dec->video_filter->feed->cb.ts(data, 188, NULL, 0,
441 &dec->video_filter->feed->feed.ts); 441 &dec->video_filter->feed->feed.ts, NULL);
442 442
443 return 0; 443 return 0;
444} 444}
@@ -490,7 +490,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
490 490
491 if (output_pva) { 491 if (output_pva) {
492 dec->video_filter->feed->cb.ts(pva, length, NULL, 0, 492 dec->video_filter->feed->cb.ts(pva, length, NULL, 0,
493 &dec->video_filter->feed->feed.ts); 493 &dec->video_filter->feed->feed.ts, NULL);
494 return; 494 return;
495 } 495 }
496 496
@@ -551,7 +551,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
551 case 0x02: /* MainAudioStream */ 551 case 0x02: /* MainAudioStream */
552 if (output_pva) { 552 if (output_pva) {
553 dec->audio_filter->feed->cb.ts(pva, length, NULL, 0, 553 dec->audio_filter->feed->cb.ts(pva, length, NULL, 0,
554 &dec->audio_filter->feed->feed.ts); 554 &dec->audio_filter->feed->feed.ts, NULL);
555 return; 555 return;
556 } 556 }
557 557
@@ -589,7 +589,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet,
589 589
590 if (filter) 590 if (filter)
591 filter->feed->cb.sec(&packet[2], length - 2, NULL, 0, 591 filter->feed->cb.sec(&packet[2], length - 2, NULL, 0,
592 &filter->filter); 592 &filter->filter, NULL);
593} 593}
594 594
595static void ttusb_dec_process_packet(struct ttusb_dec *dec) 595static void ttusb_dec_process_packet(struct ttusb_dec *dec)
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index bf52fbd07aed..8e37e7c5e0f7 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_V4L2
7 tristate 7 tristate
8 depends on (I2C || I2C=n) && VIDEO_DEV 8 depends on (I2C || I2C=n) && VIDEO_DEV
9 select RATIONAL 9 select RATIONAL
10 select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE
10 default (I2C || I2C=n) && VIDEO_DEV 11 default (I2C || I2C=n) && VIDEO_DEV
11 12
12config VIDEO_ADV_DEBUG 13config VIDEO_ADV_DEBUG
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 80de2cb9c476..7df54582e956 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -13,7 +13,7 @@ ifeq ($(CONFIG_COMPAT),y)
13endif 13endif
14obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o 14obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
15ifeq ($(CONFIG_TRACEPOINTS),y) 15ifeq ($(CONFIG_TRACEPOINTS),y)
16 videodev-objs += vb2-trace.o v4l2-trace.o 16 videodev-objs += v4l2-trace.o
17endif 17endif
18videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o 18videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
19 19
@@ -35,4 +35,3 @@ obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
35 35
36ccflags-y += -I$(srctree)/drivers/media/dvb-frontends 36ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
37ccflags-y += -I$(srctree)/drivers/media/tuners 37ccflags-y += -I$(srctree)/drivers/media/tuners
38
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 0a7bdbed3a6f..e9c1485c32b9 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -45,8 +45,16 @@
45#define REG_TO_DCPU_MBOX 0x10 45#define REG_TO_DCPU_MBOX 0x10
46#define REG_TO_HOST_MBOX 0x14 46#define REG_TO_HOST_MBOX 0x14
47 47
48/* Macros to process offsets returned by the DCPU */
49#define DRAM_MSG_ADDR_OFFSET 0x0
50#define DRAM_MSG_TYPE_OFFSET 0x1c
51#define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
52#define DRAM_MSG_TYPE_MASK ((1UL << \
53 (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
54
48/* Message RAM */ 55/* Message RAM */
49#define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32)) 56#define DCPU_MSG_RAM_START 0x100
57#define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
50 58
51/* DRAM Info Offsets & Masks */ 59/* DRAM Info Offsets & Masks */
52#define DRAM_INFO_INTERVAL 0x0 60#define DRAM_INFO_INTERVAL 0x0
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[])
255 return sum; 263 return sum;
256} 264}
257 265
266static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
267 char *buf, ssize_t *size)
268{
269 unsigned int msg_type;
270 unsigned int offset;
271 void __iomem *ptr = NULL;
272
273 msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
274 offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
275
276 /*
277 * msg_type == 1: the offset is relative to the message RAM
278 * msg_type == 0: the offset is relative to the data RAM (this is the
279 * previous way of passing data)
280 * msg_type is anything else: there's critical hardware problem
281 */
282 switch (msg_type) {
283 case 1:
284 ptr = priv->regs + DCPU_MSG_RAM_START + offset;
285 break;
286 case 0:
287 ptr = priv->dmem + offset;
288 break;
289 default:
290 dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
291 response);
292 if (buf && size)
293 *size = sprintf(buf,
294 "FATAL: communication error with DCPU\n");
295 }
296
297 return ptr;
298}
299
258static int __send_command(struct private_data *priv, unsigned int cmd, 300static int __send_command(struct private_data *priv, unsigned int cmd,
259 u32 result[]) 301 u32 result[])
260{ 302{
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
507{ 549{
508 u32 response[MSG_FIELD_MAX]; 550 u32 response[MSG_FIELD_MAX];
509 unsigned int info; 551 unsigned int info;
510 int ret; 552 ssize_t ret;
511 553
512 ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); 554 ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
513 if (ret) 555 if (ret)
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev,
528 u32 response[MSG_FIELD_MAX]; 570 u32 response[MSG_FIELD_MAX];
529 void __iomem *info; 571 void __iomem *info;
530 struct private_data *priv; 572 struct private_data *priv;
531 unsigned int offset;
532 u8 refresh, sr_abort, ppre, thermal_offs, tuf; 573 u8 refresh, sr_abort, ppre, thermal_offs, tuf;
533 u32 mr4; 574 u32 mr4;
534 int ret; 575 ssize_t ret;
535 576
536 ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); 577 ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
537 if (ret) 578 if (ret)
538 return ret; 579 return ret;
539 580
540 priv = dev_get_drvdata(dev); 581 priv = dev_get_drvdata(dev);
541 offset = response[MSG_ARG0]; 582
542 info = priv->dmem + offset; 583 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
584 if (!info)
585 return ret;
543 586
544 mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; 587 mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
545 588
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
561 u32 response[MSG_FIELD_MAX]; 604 u32 response[MSG_FIELD_MAX];
562 struct private_data *priv; 605 struct private_data *priv;
563 void __iomem *info; 606 void __iomem *info;
564 unsigned int offset;
565 unsigned long val; 607 unsigned long val;
566 int ret; 608 int ret;
567 609
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
574 if (ret) 616 if (ret)
575 return ret; 617 return ret;
576 618
577 offset = response[MSG_ARG0]; 619 info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
578 info = priv->dmem + offset; 620 if (!info)
621 return -EIO;
622
579 writel_relaxed(val, info + DRAM_INFO_INTERVAL); 623 writel_relaxed(val, info + DRAM_INFO_INTERVAL);
580 624
581 return count; 625 return count;
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
587 u32 response[MSG_FIELD_MAX]; 631 u32 response[MSG_FIELD_MAX];
588 struct private_data *priv; 632 struct private_data *priv;
589 void __iomem *info; 633 void __iomem *info;
590 unsigned int offset; 634 ssize_t ret;
591 int ret;
592 635
593 ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); 636 ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
594 if (ret) 637 if (ret)
595 return ret; 638 return ret;
596 639
597 offset = response[MSG_ARG0];
598 priv = dev_get_drvdata(dev); 640 priv = dev_get_drvdata(dev);
599 info = priv->dmem + offset; 641
642 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
643 if (!info)
644 return ret;
600 645
601 return sprintf(buf, "%#x %#x %#x %#x %#x\n", 646 return sprintf(buf, "%#x %#x %#x %#x %#x\n",
602 readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, 647 readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
603 readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, 648 readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
604 readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, 649 readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
605 readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, 650 readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
606 readl_relaxed(info + DRAM_VENDOR_ERROR)); 651 readl_relaxed(info + DRAM_VENDOR_ERROR) &
652 DRAM_VENDOR_MASK);
607} 653}
608 654
609static int brcmstb_dpfe_resume(struct platform_device *pdev) 655static int brcmstb_dpfe_resume(struct platform_device *pdev)
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 337462e1569f..038509e5d031 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
102 return rc; 102 return rc;
103} 103}
104 104
105static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
106 struct ocxl_ioctl_metadata __user *uarg)
107{
108 struct ocxl_ioctl_metadata arg;
109
110 memset(&arg, 0, sizeof(arg));
111
112 arg.version = 0;
113
114 arg.afu_version_major = ctx->afu->config.version_major;
115 arg.afu_version_minor = ctx->afu->config.version_minor;
116 arg.pasid = ctx->pasid;
117 arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride;
118 arg.global_mmio_size = ctx->afu->config.global_mmio_size;
119
120 if (copy_to_user(uarg, &arg, sizeof(arg)))
121 return -EFAULT;
122
123 return 0;
124}
125
105#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ 126#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
106 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ 127 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \
107 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ 128 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \
108 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ 129 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \
130 x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
109 "UNKNOWN") 131 "UNKNOWN")
110 132
111static long afu_ioctl(struct file *file, unsigned int cmd, 133static long afu_ioctl(struct file *file, unsigned int cmd,
@@ -159,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
159 irq_fd.eventfd); 181 irq_fd.eventfd);
160 break; 182 break;
161 183
184 case OCXL_IOCTL_GET_METADATA:
185 rc = afu_ioctl_get_metadata(ctx,
186 (struct ocxl_ioctl_metadata __user *) args);
187 break;
188
162 default: 189 default:
163 rc = -EINVAL; 190 rc = -EINVAL;
164 } 191 }
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 908e4db03535..42d6aa89a48a 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
848 return 1; 848 return 1;
849 } 849 }
850 850
851 mmc_claim_host(card->host);
852 err = mmc_send_status(card, &status); 851 err = mmc_send_status(card, &status);
853 if (err) { 852 if (err) {
854 pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); 853 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
890 } while (!err); 889 } while (!err);
891 890
892out: 891out:
893 mmc_release_host(card->host);
894 return err; 892 return err;
895} 893}
896 894
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card)
932 int err; 930 int err;
933 u8 *ext_csd; 931 u8 *ext_csd;
934 932
935 mmc_claim_host(card->host);
936 err = mmc_get_ext_csd(card, &ext_csd); 933 err = mmc_get_ext_csd(card, &ext_csd);
937 mmc_release_host(card->host);
938 if (err) 934 if (err)
939 return err; 935 return err;
940 936
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 35026795be28..fa41d9422d57 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
487 487
488static const struct dw_mci_drv_data exynos_drv_data = { 488static const struct dw_mci_drv_data exynos_drv_data = {
489 .caps = exynos_dwmmc_caps, 489 .caps = exynos_dwmmc_caps,
490 .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
490 .init = dw_mci_exynos_priv_init, 491 .init = dw_mci_exynos_priv_init,
491 .set_ios = dw_mci_exynos_set_ios, 492 .set_ios = dw_mci_exynos_set_ios,
492 .parse_dt = dw_mci_exynos_parse_dt, 493 .parse_dt = dw_mci_exynos_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 73fd75c3c824..89cdb3d533bb 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
135 if (priv->ctrl_id < 0) 135 if (priv->ctrl_id < 0)
136 priv->ctrl_id = 0; 136 priv->ctrl_id = 0;
137 137
138 if (priv->ctrl_id >= TIMING_MODE)
139 return -EINVAL;
140
138 host->priv = priv; 141 host->priv = priv;
139 return 0; 142 return 0;
140} 143}
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
207 210
208static const struct dw_mci_drv_data hi6220_data = { 211static const struct dw_mci_drv_data hi6220_data = {
209 .caps = dw_mci_hi6220_caps, 212 .caps = dw_mci_hi6220_caps,
213 .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps),
210 .switch_voltage = dw_mci_hi6220_switch_voltage, 214 .switch_voltage = dw_mci_hi6220_switch_voltage,
211 .set_ios = dw_mci_hi6220_set_ios, 215 .set_ios = dw_mci_hi6220_set_ios,
212 .parse_dt = dw_mci_hi6220_parse_dt, 216 .parse_dt = dw_mci_hi6220_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index a3f1c2b30145..339295212935 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
319 319
320static const struct dw_mci_drv_data rk3288_drv_data = { 320static const struct dw_mci_drv_data rk3288_drv_data = {
321 .caps = dw_mci_rk3288_dwmmc_caps, 321 .caps = dw_mci_rk3288_dwmmc_caps,
322 .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
322 .set_ios = dw_mci_rk3288_set_ios, 323 .set_ios = dw_mci_rk3288_set_ios,
323 .execute_tuning = dw_mci_rk3288_execute_tuning, 324 .execute_tuning = dw_mci_rk3288_execute_tuning,
324 .parse_dt = dw_mci_rk3288_parse_dt, 325 .parse_dt = dw_mci_rk3288_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c
index d38e94ae2b85..c06b5393312f 100644
--- a/drivers/mmc/host/dw_mmc-zx.c
+++ b/drivers/mmc/host/dw_mmc-zx.c
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
195 195
196static const struct dw_mci_drv_data zx_drv_data = { 196static const struct dw_mci_drv_data zx_drv_data = {
197 .caps = zx_dwmmc_caps, 197 .caps = zx_dwmmc_caps,
198 .num_caps = ARRAY_SIZE(zx_dwmmc_caps),
198 .execute_tuning = dw_mci_zx_execute_tuning, 199 .execute_tuning = dw_mci_zx_execute_tuning,
199 .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, 200 .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
200 .parse_dt = dw_mci_zx_parse_dt, 201 .parse_dt = dw_mci_zx_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 0aa39975f33b..d9b4acefed31 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
165{ 165{
166 struct dw_mci *host = s->private; 166 struct dw_mci *host = s->private;
167 167
168 pm_runtime_get_sync(host->dev);
169
168 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 170 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
169 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 171 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
170 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 172 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
172 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 174 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
173 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 175 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
174 176
177 pm_runtime_put_autosuspend(host->dev);
178
175 return 0; 179 return 0;
176} 180}
177 181
@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2778 return IRQ_HANDLED; 2782 return IRQ_HANDLED;
2779} 2783}
2780 2784
2785static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2786{
2787 struct dw_mci *host = slot->host;
2788 const struct dw_mci_drv_data *drv_data = host->drv_data;
2789 struct mmc_host *mmc = slot->mmc;
2790 int ctrl_id;
2791
2792 if (host->pdata->caps)
2793 mmc->caps = host->pdata->caps;
2794
2795 /*
2796 * Support MMC_CAP_ERASE by default.
2797 * It needs to use trim/discard/erase commands.
2798 */
2799 mmc->caps |= MMC_CAP_ERASE;
2800
2801 if (host->pdata->pm_caps)
2802 mmc->pm_caps = host->pdata->pm_caps;
2803
2804 if (host->dev->of_node) {
2805 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2806 if (ctrl_id < 0)
2807 ctrl_id = 0;
2808 } else {
2809 ctrl_id = to_platform_device(host->dev)->id;
2810 }
2811
2812 if (drv_data && drv_data->caps) {
2813 if (ctrl_id >= drv_data->num_caps) {
2814 dev_err(host->dev, "invalid controller id %d\n",
2815 ctrl_id);
2816 return -EINVAL;
2817 }
2818 mmc->caps |= drv_data->caps[ctrl_id];
2819 }
2820
2821 if (host->pdata->caps2)
2822 mmc->caps2 = host->pdata->caps2;
2823
2824 /* Process SDIO IRQs through the sdio_irq_work. */
2825 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2826 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2827
2828 return 0;
2829}
2830
2781static int dw_mci_init_slot(struct dw_mci *host) 2831static int dw_mci_init_slot(struct dw_mci *host)
2782{ 2832{
2783 struct mmc_host *mmc; 2833 struct mmc_host *mmc;
2784 struct dw_mci_slot *slot; 2834 struct dw_mci_slot *slot;
2785 const struct dw_mci_drv_data *drv_data = host->drv_data; 2835 int ret;
2786 int ctrl_id, ret;
2787 u32 freq[2]; 2836 u32 freq[2];
2788 2837
2789 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2838 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
2817 if (!mmc->ocr_avail) 2866 if (!mmc->ocr_avail)
2818 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2867 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2819 2868
2820 if (host->pdata->caps)
2821 mmc->caps = host->pdata->caps;
2822
2823 /*
2824 * Support MMC_CAP_ERASE by default.
2825 * It needs to use trim/discard/erase commands.
2826 */
2827 mmc->caps |= MMC_CAP_ERASE;
2828
2829 if (host->pdata->pm_caps)
2830 mmc->pm_caps = host->pdata->pm_caps;
2831
2832 if (host->dev->of_node) {
2833 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2834 if (ctrl_id < 0)
2835 ctrl_id = 0;
2836 } else {
2837 ctrl_id = to_platform_device(host->dev)->id;
2838 }
2839 if (drv_data && drv_data->caps)
2840 mmc->caps |= drv_data->caps[ctrl_id];
2841
2842 if (host->pdata->caps2)
2843 mmc->caps2 = host->pdata->caps2;
2844
2845 ret = mmc_of_parse(mmc); 2869 ret = mmc_of_parse(mmc);
2846 if (ret) 2870 if (ret)
2847 goto err_host_allocated; 2871 goto err_host_allocated;
2848 2872
2849 /* Process SDIO IRQs through the sdio_irq_work. */ 2873 ret = dw_mci_init_slot_caps(slot);
2850 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2874 if (ret)
2851 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2875 goto err_host_allocated;
2852 2876
2853 /* Useful defaults if platform data is unset. */ 2877 /* Useful defaults if platform data is unset. */
2854 if (host->use_dma == TRANS_MODE_IDMAC) { 2878 if (host->use_dma == TRANS_MODE_IDMAC) {
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index e3124f06a47e..1424bd490dd1 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -543,6 +543,7 @@ struct dw_mci_slot {
543/** 543/**
544 * dw_mci driver data - dw-mshc implementation specific driver data. 544 * dw_mci driver data - dw-mshc implementation specific driver data.
545 * @caps: mmc subsystem specified capabilities of the controller(s). 545 * @caps: mmc subsystem specified capabilities of the controller(s).
546 * @num_caps: number of capabilities specified by @caps.
546 * @init: early implementation specific initialization. 547 * @init: early implementation specific initialization.
547 * @set_ios: handle bus specific extensions. 548 * @set_ios: handle bus specific extensions.
548 * @parse_dt: parse implementation specific device tree properties. 549 * @parse_dt: parse implementation specific device tree properties.
@@ -554,6 +555,7 @@ struct dw_mci_slot {
554 */ 555 */
555struct dw_mci_drv_data { 556struct dw_mci_drv_data {
556 unsigned long *caps; 557 unsigned long *caps;
558 u32 num_caps;
557 int (*init)(struct dw_mci *host); 559 int (*init)(struct dw_mci *host);
558 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); 560 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
559 int (*parse_dt)(struct dw_mci *host); 561 int (*parse_dt)(struct dw_mci *host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 6d1a983e6227..82c4f05f91d8 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
654 slot->chip->rpm_retune = intel_host->d3_retune; 654 slot->chip->rpm_retune = intel_host->d3_retune;
655} 655}
656 656
657static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 657static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
658{
659 int err = sdhci_execute_tuning(mmc, opcode);
660 struct sdhci_host *host = mmc_priv(mmc);
661
662 if (err)
663 return err;
664
665 /*
666 * Tuning can leave the IP in an active state (Buffer Read Enable bit
667 * set) which prevents the entry to low power states (i.e. S0i3). Data
668 * reset will clear it.
669 */
670 sdhci_reset(host, SDHCI_RESET_DATA);
671
672 return 0;
673}
674
675static void byt_probe_slot(struct sdhci_pci_slot *slot)
658{ 676{
677 struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
678
659 byt_read_dsm(slot); 679 byt_read_dsm(slot);
680
681 ops->execute_tuning = intel_execute_tuning;
682}
683
684static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
685{
686 byt_probe_slot(slot);
660 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 687 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
661 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | 688 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
662 MMC_CAP_CMD_DURING_TFR | 689 MMC_CAP_CMD_DURING_TFR |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
779{ 806{
780 int err; 807 int err;
781 808
782 byt_read_dsm(slot); 809 byt_probe_slot(slot);
783 810
784 err = ni_set_max_freq(slot); 811 err = ni_set_max_freq(slot);
785 if (err) 812 if (err)
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
792 819
793static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) 820static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
794{ 821{
795 byt_read_dsm(slot); 822 byt_probe_slot(slot);
796 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | 823 slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
797 MMC_CAP_WAIT_WHILE_BUSY; 824 MMC_CAP_WAIT_WHILE_BUSY;
798 return 0; 825 return 0;
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
800 827
801static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) 828static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
802{ 829{
803 byt_read_dsm(slot); 830 byt_probe_slot(slot);
804 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | 831 slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
805 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; 832 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
806 slot->cd_idx = 0; 833 slot->cd_idx = 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f5c87bd35fa1..f27f9bae1a4a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3063,9 +3063,6 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
3063 if (ndev->features & NETIF_F_RXCSUM) 3063 if (ndev->features & NETIF_F_RXCSUM)
3064 gfar_rx_checksum(skb, fcb); 3064 gfar_rx_checksum(skb, fcb);
3065 3065
3066 /* Tell the skb what kind of packet this is */
3067 skb->protocol = eth_type_trans(skb, ndev);
3068
3069 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. 3066 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
3070 * Even if vlan rx accel is disabled, on some chips 3067 * Even if vlan rx accel is disabled, on some chips
3071 * RXFCB_VLN is pseudo randomly set. 3068 * RXFCB_VLN is pseudo randomly set.
@@ -3136,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
3136 continue; 3133 continue;
3137 } 3134 }
3138 3135
3136 gfar_process_frame(ndev, skb);
3137
3139 /* Increment the number of packets */ 3138 /* Increment the number of packets */
3140 total_pkts++; 3139 total_pkts++;
3141 total_bytes += skb->len; 3140 total_bytes += skb->len;
3142 3141
3143 skb_record_rx_queue(skb, rx_queue->qindex); 3142 skb_record_rx_queue(skb, rx_queue->qindex);
3144 3143
3145 gfar_process_frame(ndev, skb); 3144 skb->protocol = eth_type_trans(skb, ndev);
3146 3145
3147 /* Send the packet up the stack */ 3146 /* Send the packet up the stack */
3148 napi_gro_receive(&rx_queue->grp->napi_rx, skb); 3147 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0da5aa2c8aba..9fc063af233c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
1888 ixgbe_rx_pg_size(rx_ring), 1888 ixgbe_rx_pg_size(rx_ring),
1889 DMA_FROM_DEVICE, 1889 DMA_FROM_DEVICE,
1890 IXGBE_RX_DMA_ATTR); 1890 IXGBE_RX_DMA_ATTR);
1891 } else if (ring_uses_build_skb(rx_ring)) {
1892 unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
1893
1894 dma_sync_single_range_for_cpu(rx_ring->dev,
1895 IXGBE_CB(skb)->dma,
1896 offset,
1897 skb_headlen(skb),
1898 DMA_FROM_DEVICE);
1891 } else { 1899 } else {
1892 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 1900 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1893 1901
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 21d29f7936f6..d39b0b7011b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
124 trigger_cmd_completions(dev); 124 trigger_cmd_completions(dev);
125 } 125 }
126 126
127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
128 mlx5_core_err(dev, "end\n"); 128 mlx5_core_err(dev, "end\n");
129 129
130unlock: 130unlock:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index f6963b0b4a55..122506daa586 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
107 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), 107 MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12),
108 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), 108 MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3),
109 MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), 109 MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9),
110 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8),
111 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2),
112 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6),
113 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
114 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
115 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
116 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
117 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
118 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
119 MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), 110 MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
120 MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), 111 MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
112 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
113 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
114 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
115 MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32),
116 MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32),
117 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8),
118 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8),
119 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8),
120 MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8),
121}; 121};
122 122
123#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 123#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
124 124
125struct mlxsw_afk_element_inst { /* element instance in actual block */ 125struct mlxsw_afk_element_inst { /* element instance in actual block */
126 const struct mlxsw_afk_element_info *info; 126 const struct mlxsw_afk_element_info *info;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3dcc58d61506..c7e941aecc2a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1459,6 +1459,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1459 } 1459 }
1460 1460
1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; 1461 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1462 mlxsw_sp_port_vlan->ref_count = 1;
1462 mlxsw_sp_port_vlan->vid = vid; 1463 mlxsw_sp_port_vlan->vid = vid;
1463 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); 1464 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1464 1465
@@ -1486,8 +1487,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1486 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1487 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1487 1488
1488 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1489 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1489 if (mlxsw_sp_port_vlan) 1490 if (mlxsw_sp_port_vlan) {
1491 mlxsw_sp_port_vlan->ref_count++;
1490 return mlxsw_sp_port_vlan; 1492 return mlxsw_sp_port_vlan;
1493 }
1491 1494
1492 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); 1495 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1493} 1496}
@@ -1496,6 +1499,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1496{ 1499{
1497 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; 1500 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1498 1501
1502 if (--mlxsw_sp_port_vlan->ref_count != 0)
1503 return;
1504
1499 if (mlxsw_sp_port_vlan->bridge_port) 1505 if (mlxsw_sp_port_vlan->bridge_port)
1500 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); 1506 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1501 else if (fid) 1507 else if (fid)
@@ -4207,13 +4213,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
4207 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, 4213 .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
4208}; 4214};
4209 4215
4210static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
4211static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
4212static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
4213static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
4214
4215static void 4216static void
4216mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) 4217mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
4218 struct devlink_resource_size_params *kvd_size_params,
4219 struct devlink_resource_size_params *linear_size_params,
4220 struct devlink_resource_size_params *hash_double_size_params,
4221 struct devlink_resource_size_params *hash_single_size_params)
4217{ 4222{
4218 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, 4223 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
4219 KVD_SINGLE_MIN_SIZE); 4224 KVD_SINGLE_MIN_SIZE);
@@ -4222,37 +4227,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
4222 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4227 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4223 u32 linear_size_min = 0; 4228 u32 linear_size_min = 0;
4224 4229
4225 /* KVD top resource */ 4230 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
4226 mlxsw_sp_kvd_size_params.size_min = kvd_size; 4231 MLXSW_SP_KVD_GRANULARITY,
4227 mlxsw_sp_kvd_size_params.size_max = kvd_size; 4232 DEVLINK_RESOURCE_UNIT_ENTRY);
4228 mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4233 devlink_resource_size_params_init(linear_size_params, linear_size_min,
4229 mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4234 kvd_size - single_size_min -
4230 4235 double_size_min,
4231 /* Linear part init */ 4236 MLXSW_SP_KVD_GRANULARITY,
4232 mlxsw_sp_linear_size_params.size_min = linear_size_min; 4237 DEVLINK_RESOURCE_UNIT_ENTRY);
4233 mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - 4238 devlink_resource_size_params_init(hash_double_size_params,
4234 double_size_min; 4239 double_size_min,
4235 mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4240 kvd_size - single_size_min -
4236 mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4241 linear_size_min,
4237 4242 MLXSW_SP_KVD_GRANULARITY,
4238 /* Hash double part init */ 4243 DEVLINK_RESOURCE_UNIT_ENTRY);
4239 mlxsw_sp_hash_double_size_params.size_min = double_size_min; 4244 devlink_resource_size_params_init(hash_single_size_params,
4240 mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - 4245 single_size_min,
4241 linear_size_min; 4246 kvd_size - double_size_min -
4242 mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; 4247 linear_size_min,
4243 mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; 4248 MLXSW_SP_KVD_GRANULARITY,
4244 4249 DEVLINK_RESOURCE_UNIT_ENTRY);
4245 /* Hash single part init */
4246 mlxsw_sp_hash_single_size_params.size_min = single_size_min;
4247 mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
4248 linear_size_min;
4249 mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
4250 mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
4251} 4250}
4252 4251
4253static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) 4252static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4254{ 4253{
4255 struct devlink *devlink = priv_to_devlink(mlxsw_core); 4254 struct devlink *devlink = priv_to_devlink(mlxsw_core);
4255 struct devlink_resource_size_params hash_single_size_params;
4256 struct devlink_resource_size_params hash_double_size_params;
4257 struct devlink_resource_size_params linear_size_params;
4258 struct devlink_resource_size_params kvd_size_params;
4256 u32 kvd_size, single_size, double_size, linear_size; 4259 u32 kvd_size, single_size, double_size, linear_size;
4257 const struct mlxsw_config_profile *profile; 4260 const struct mlxsw_config_profile *profile;
4258 int err; 4261 int err;
@@ -4261,13 +4264,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4261 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) 4264 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
4262 return -EIO; 4265 return -EIO;
4263 4266
4264 mlxsw_sp_resource_size_params_prepare(mlxsw_core); 4267 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
4268 &linear_size_params,
4269 &hash_double_size_params,
4270 &hash_single_size_params);
4271
4265 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); 4272 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
4266 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, 4273 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
4267 true, kvd_size, 4274 true, kvd_size,
4268 MLXSW_SP_RESOURCE_KVD, 4275 MLXSW_SP_RESOURCE_KVD,
4269 DEVLINK_RESOURCE_ID_PARENT_TOP, 4276 DEVLINK_RESOURCE_ID_PARENT_TOP,
4270 &mlxsw_sp_kvd_size_params, 4277 &kvd_size_params,
4271 &mlxsw_sp_resource_kvd_ops); 4278 &mlxsw_sp_resource_kvd_ops);
4272 if (err) 4279 if (err)
4273 return err; 4280 return err;
@@ -4277,7 +4284,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4277 false, linear_size, 4284 false, linear_size,
4278 MLXSW_SP_RESOURCE_KVD_LINEAR, 4285 MLXSW_SP_RESOURCE_KVD_LINEAR,
4279 MLXSW_SP_RESOURCE_KVD, 4286 MLXSW_SP_RESOURCE_KVD,
4280 &mlxsw_sp_linear_size_params, 4287 &linear_size_params,
4281 &mlxsw_sp_resource_kvd_linear_ops); 4288 &mlxsw_sp_resource_kvd_linear_ops);
4282 if (err) 4289 if (err)
4283 return err; 4290 return err;
@@ -4291,7 +4298,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4291 false, double_size, 4298 false, double_size,
4292 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, 4299 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
4293 MLXSW_SP_RESOURCE_KVD, 4300 MLXSW_SP_RESOURCE_KVD,
4294 &mlxsw_sp_hash_double_size_params, 4301 &hash_double_size_params,
4295 &mlxsw_sp_resource_kvd_hash_double_ops); 4302 &mlxsw_sp_resource_kvd_hash_double_ops);
4296 if (err) 4303 if (err)
4297 return err; 4304 return err;
@@ -4301,7 +4308,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
4301 false, single_size, 4308 false, single_size,
4302 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, 4309 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
4303 MLXSW_SP_RESOURCE_KVD, 4310 MLXSW_SP_RESOURCE_KVD,
4304 &mlxsw_sp_hash_single_size_params, 4311 &hash_single_size_params,
4305 &mlxsw_sp_resource_kvd_hash_single_ops); 4312 &mlxsw_sp_resource_kvd_hash_single_ops);
4306 if (err) 4313 if (err)
4307 return err; 4314 return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bdd8f94a452c..4ec1ca3c96c8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -211,6 +211,7 @@ struct mlxsw_sp_port_vlan {
211 struct list_head list; 211 struct list_head list;
212 struct mlxsw_sp_port *mlxsw_sp_port; 212 struct mlxsw_sp_port *mlxsw_sp_port;
213 struct mlxsw_sp_fid *fid; 213 struct mlxsw_sp_fid *fid;
214 unsigned int ref_count;
214 u16 vid; 215 u16 vid;
215 struct mlxsw_sp_bridge_port *bridge_port; 216 struct mlxsw_sp_bridge_port *bridge_port;
216 struct list_head bridge_vlan_node; 217 struct list_head bridge_vlan_node;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index bbd238e50f05..54262af4e98f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
112 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, 112 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1,
113 [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, 113 [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1,
114 [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, 114 [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1,
115 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
115}; 116};
116 117
117static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { 118static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
118 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, 119 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1,
119 [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1,
120}; 120};
121 121
122static const int *mlxsw_sp_packet_type_sfgc_types[] = { 122static const int *mlxsw_sp_packet_type_sfgc_types[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 593ad31be749..161bcdc012f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1203 bool dynamic) 1203 bool dynamic)
1204{ 1204{
1205 char *sfd_pl; 1205 char *sfd_pl;
1206 u8 num_rec;
1206 int err; 1207 int err;
1207 1208
1208 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1209 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1212 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1213 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1213 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1214 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1214 mac, fid, action, local_port); 1215 mac, fid, action, local_port);
1216 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1215 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1217 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1216 kfree(sfd_pl); 1218 if (err)
1219 goto out;
1220
1221 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1222 err = -EBUSY;
1217 1223
1224out:
1225 kfree(sfd_pl);
1218 return err; 1226 return err;
1219} 1227}
1220 1228
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1239 bool adding, bool dynamic) 1247 bool adding, bool dynamic)
1240{ 1248{
1241 char *sfd_pl; 1249 char *sfd_pl;
1250 u8 num_rec;
1242 int err; 1251 int err;
1243 1252
1244 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1253 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1249 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1258 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1250 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, 1259 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1251 lag_vid, lag_id); 1260 lag_vid, lag_id);
1261 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1252 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1262 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1253 kfree(sfd_pl); 1263 if (err)
1264 goto out;
1265
1266 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1267 err = -EBUSY;
1254 1268
1269out:
1270 kfree(sfd_pl);
1255 return err; 1271 return err;
1256} 1272}
1257 1273
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1296 u16 fid, u16 mid_idx, bool adding) 1312 u16 fid, u16 mid_idx, bool adding)
1297{ 1313{
1298 char *sfd_pl; 1314 char *sfd_pl;
1315 u8 num_rec;
1299 int err; 1316 int err;
1300 1317
1301 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); 1318 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1305 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1322 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1306 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, 1323 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1307 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); 1324 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1325 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1326 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1327 if (err)
1328 goto out;
1329
1330 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1331 err = -EBUSY;
1332
1333out:
1309 kfree(sfd_pl); 1334 kfree(sfd_pl);
1310 return err; 1335 return err;
1311} 1336}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 92dcf8717fc6..14c839bb09e7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -439,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
439 enum_index); 439 enum_index);
440} 440}
441 441
442static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
443 int enum_index)
444{
445 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
446}
447
448static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
449{
450 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
451}
452
442static bool sh_eth_is_gether(struct sh_eth_private *mdp) 453static bool sh_eth_is_gether(struct sh_eth_private *mdp)
443{ 454{
444 return mdp->reg_offset == sh_eth_offset_gigabit; 455 return mdp->reg_offset == sh_eth_offset_gigabit;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a6753ccba711..e5fe70134690 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
567 return mdp->tsu_addr + mdp->reg_offset[enum_index]; 567 return mdp->tsu_addr + mdp->reg_offset[enum_index];
568} 568}
569 569
570static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
571 int enum_index)
572{
573 iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
574}
575
576static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
577{
578 return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
579}
580
581#endif /* #ifndef __SH_ETH_H__ */ 570#endif /* #ifndef __SH_ETH_H__ */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 17e529af79dc..0265d703eb03 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
852 if (unlikely(!net_device || net_device->destroy)) 852 if (unlikely(!net_device || net_device->destroy))
853 return -ENODEV; 853 return -ENODEV;
854 854
855 /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
856 * here before the negotiation with the host is finished and
857 * send_section_map may not be allocated yet.
858 */
859 if (unlikely(!net_device->send_section_map))
860 return -EAGAIN;
861
862 nvchan = &net_device->chan_table[packet->q_idx]; 855 nvchan = &net_device->chan_table[packet->q_idx];
863 packet->send_buf_index = NETVSC_INVALID_INDEX; 856 packet->send_buf_index = NETVSC_INVALID_INDEX;
864 packet->cp_partial = false; 857 packet->cp_partial = false;
@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
866 /* Send control message directly without accessing msd (Multi-Send 859 /* Send control message directly without accessing msd (Multi-Send
867 * Data) field which may be changed during data packet processing. 860 * Data) field which may be changed during data packet processing.
868 */ 861 */
869 if (!skb) { 862 if (!skb)
870 cur_send = packet; 863 return netvsc_send_pkt(device, packet, net_device, pb, skb);
871 goto send_now;
872 }
873 864
874 /* batch packets in send buffer if possible */ 865 /* batch packets in send buffer if possible */
875 msdp = &nvchan->msd; 866 msdp = &nvchan->msd;
@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
953 } 944 }
954 } 945 }
955 946
956send_now:
957 if (cur_send) 947 if (cur_send)
958 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); 948 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
959 949
@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1217 if (send_recv_completions(ndev, net_device, nvchan) == 0 && 1207 if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
1218 work_done < budget && 1208 work_done < budget &&
1219 napi_complete_done(napi, work_done) && 1209 napi_complete_done(napi, work_done) &&
1220 hv_end_read(&channel->inbound)) { 1210 hv_end_read(&channel->inbound) &&
1211 napi_schedule_prep(napi)) {
1221 hv_begin_read(&channel->inbound); 1212 hv_begin_read(&channel->inbound);
1222 napi_reschedule(napi); 1213 __napi_schedule(napi);
1223 } 1214 }
1224 1215
1225 /* Driver may overshoot since multiple packets per descriptor */ 1216 /* Driver may overshoot since multiple packets per descriptor */
@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
1242 /* disable interupts from host */ 1233 /* disable interupts from host */
1243 hv_begin_read(rbi); 1234 hv_begin_read(rbi);
1244 1235
1245 __napi_schedule(&nvchan->napi); 1236 __napi_schedule_irqoff(&nvchan->napi);
1246 } 1237 }
1247} 1238}
1248 1239
@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1296 netvsc_channel_cb, net_device->chan_table); 1287 netvsc_channel_cb, net_device->chan_table);
1297 1288
1298 if (ret != 0) { 1289 if (ret != 0) {
1299 netif_napi_del(&net_device->chan_table[0].napi);
1300 netdev_err(ndev, "unable to open channel: %d\n", ret); 1290 netdev_err(ndev, "unable to open channel: %d\n", ret);
1301 goto cleanup; 1291 goto cleanup;
1302 } 1292 }
@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1306 1296
1307 napi_enable(&net_device->chan_table[0].napi); 1297 napi_enable(&net_device->chan_table[0].napi);
1308 1298
1309 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1310 * populated.
1311 */
1312 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1313
1314 /* Connect with the NetVsp */ 1299 /* Connect with the NetVsp */
1315 ret = netvsc_connect_vsp(device, net_device, device_info); 1300 ret = netvsc_connect_vsp(device, net_device, device_info);
1316 if (ret != 0) { 1301 if (ret != 0) {
@@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
1319 goto close; 1304 goto close;
1320 } 1305 }
1321 1306
1307 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1308 * populated.
1309 */
1310 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1311
1322 return net_device; 1312 return net_device;
1323 1313
1324close: 1314close:
@@ -1329,6 +1319,7 @@ close:
1329 vmbus_close(device->channel); 1319 vmbus_close(device->channel);
1330 1320
1331cleanup: 1321cleanup:
1322 netif_napi_del(&net_device->chan_table[0].napi);
1332 free_netvsc_device(&net_device->rcu); 1323 free_netvsc_device(&net_device->rcu);
1333 1324
1334 return ERR_PTR(ret); 1325 return ERR_PTR(ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c5584c2d440e..cdb78eefab67 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -66,10 +66,36 @@ static int debug = -1;
66module_param(debug, int, S_IRUGO); 66module_param(debug, int, S_IRUGO);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 68
69static void netvsc_set_multicast_list(struct net_device *net) 69static void netvsc_change_rx_flags(struct net_device *net, int change)
70{ 70{
71 struct net_device_context *net_device_ctx = netdev_priv(net); 71 struct net_device_context *ndev_ctx = netdev_priv(net);
72 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 72 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
73 int inc;
74
75 if (!vf_netdev)
76 return;
77
78 if (change & IFF_PROMISC) {
79 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
80 dev_set_promiscuity(vf_netdev, inc);
81 }
82
83 if (change & IFF_ALLMULTI) {
84 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
85 dev_set_allmulti(vf_netdev, inc);
86 }
87}
88
89static void netvsc_set_rx_mode(struct net_device *net)
90{
91 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
94
95 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net);
98 }
73 99
74 rndis_filter_update(nvdev); 100 rndis_filter_update(nvdev);
75} 101}
@@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net)
91 return ret; 117 return ret;
92 } 118 }
93 119
94 netif_tx_wake_all_queues(net);
95
96 rdev = nvdev->extension; 120 rdev = nvdev->extension;
97 121 if (!rdev->link_state) {
98 if (!rdev->link_state)
99 netif_carrier_on(net); 122 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
100 125
101 if (vf_netdev) { 126 if (vf_netdev) {
102 /* Setting synthetic device up transparently sets 127 /* Setting synthetic device up transparently sets
@@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
299 rcu_read_lock(); 324 rcu_read_lock();
300 vf_netdev = rcu_dereference(ndc->vf_netdev); 325 vf_netdev = rcu_dereference(ndc->vf_netdev);
301 if (vf_netdev) { 326 if (vf_netdev) {
302 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 327 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
303 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; 328
329 if (vf_ops->ndo_select_queue)
330 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
331 accel_priv, fallback);
332 else
333 txq = fallback(vf_netdev, skb);
334
335 /* Record the queue selected by VF so that it can be
336 * used for common case where VF has more queues than
337 * the synthetic device.
338 */
339 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
304 } else { 340 } else {
305 txq = netvsc_pick_tx(ndev, skb); 341 txq = netvsc_pick_tx(ndev, skb);
306 } 342 }
@@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = {
1576 .ndo_open = netvsc_open, 1612 .ndo_open = netvsc_open,
1577 .ndo_stop = netvsc_close, 1613 .ndo_stop = netvsc_close,
1578 .ndo_start_xmit = netvsc_start_xmit, 1614 .ndo_start_xmit = netvsc_start_xmit,
1579 .ndo_set_rx_mode = netvsc_set_multicast_list, 1615 .ndo_change_rx_flags = netvsc_change_rx_flags,
1616 .ndo_set_rx_mode = netvsc_set_rx_mode,
1580 .ndo_change_mtu = netvsc_change_mtu, 1617 .ndo_change_mtu = netvsc_change_mtu,
1581 .ndo_validate_addr = eth_validate_addr, 1618 .ndo_validate_addr = eth_validate_addr,
1582 .ndo_set_mac_address = netvsc_set_mac_addr, 1619 .ndo_set_mac_address = netvsc_set_mac_addr,
@@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1807 netdev_warn(vf_netdev, 1844 netdev_warn(vf_netdev,
1808 "unable to change mtu to %u\n", ndev->mtu); 1845 "unable to change mtu to %u\n", ndev->mtu);
1809 1846
1847 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1849 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev);
1851
1810 if (netif_running(ndev)) { 1852 if (netif_running(ndev)) {
1811 ret = dev_open(vf_netdev); 1853 ret = dev_open(vf_netdev);
1812 if (ret) 1854 if (ret)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c3ca191fea7f..8927c483c217 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w)
854{ 854{
855 struct rndis_device *rdev 855 struct rndis_device *rdev
856 = container_of(w, struct rndis_device, mcast_work); 856 = container_of(w, struct rndis_device, mcast_work);
857 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
858 unsigned int flags = rdev->ndev->flags;
857 859
858 if (rdev->ndev->flags & IFF_PROMISC) 860 if (flags & IFF_PROMISC) {
859 rndis_filter_set_packet_filter(rdev, 861 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
860 NDIS_PACKET_TYPE_PROMISCUOUS); 862 } else {
861 else 863 if (flags & IFF_ALLMULTI)
862 rndis_filter_set_packet_filter(rdev, 864 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
863 NDIS_PACKET_TYPE_BROADCAST | 865 if (flags & IFF_BROADCAST)
864 NDIS_PACKET_TYPE_ALL_MULTICAST | 866 flags |= NDIS_PACKET_TYPE_BROADCAST;
865 NDIS_PACKET_TYPE_DIRECTED); 867 }
868
869 rndis_filter_set_packet_filter(rdev, filter);
866} 870}
867 871
868void rndis_filter_update(struct netvsc_device *nvdev) 872void rndis_filter_update(struct netvsc_device *nvdev)
@@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev,
1340{ 1344{
1341 struct rndis_device *rndis_dev = net_dev->extension; 1345 struct rndis_device *rndis_dev = net_dev->extension;
1342 1346
1347 /* Don't try and setup sub channels if about to halt */
1348 cancel_work_sync(&net_dev->subchan_work);
1349
1343 /* Halt and release the rndis device */ 1350 /* Halt and release the rndis device */
1344 rndis_filter_halt_device(rndis_dev); 1351 rndis_filter_halt_device(rndis_dev);
1345 1352
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e3e29c2b028b..a6f924fee584 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -819,7 +819,7 @@ void phy_start(struct phy_device *phydev)
819 break; 819 break;
820 case PHY_HALTED: 820 case PHY_HALTED:
821 /* if phy was suspended, bring the physical link up again */ 821 /* if phy was suspended, bring the physical link up again */
822 phy_resume(phydev); 822 __phy_resume(phydev);
823 823
824 /* make sure interrupts are re-enabled for the PHY */ 824 /* make sure interrupts are re-enabled for the PHY */
825 if (phy_interrupt_is_valid(phydev)) { 825 if (phy_interrupt_is_valid(phydev)) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index d39ae77707ef..478405e544cc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
139 ret = phy_resume(phydev); 138 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
141 if (ret < 0) 139 if (ret < 0)
142 return ret; 140 return ret;
143 141
@@ -1041,9 +1039,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1041 if (err) 1039 if (err)
1042 goto error; 1040 goto error;
1043 1041
1044 mutex_lock(&phydev->lock);
1045 phy_resume(phydev); 1042 phy_resume(phydev);
1046 mutex_unlock(&phydev->lock);
1047 phy_led_triggers_register(phydev); 1043 phy_led_triggers_register(phydev);
1048 1044
1049 return err; 1045 return err;
@@ -1172,7 +1168,7 @@ int phy_suspend(struct phy_device *phydev)
1172} 1168}
1173EXPORT_SYMBOL(phy_suspend); 1169EXPORT_SYMBOL(phy_suspend);
1174 1170
1175int phy_resume(struct phy_device *phydev) 1171int __phy_resume(struct phy_device *phydev)
1176{ 1172{
1177 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1173 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1178 int ret = 0; 1174 int ret = 0;
@@ -1189,6 +1185,18 @@ int phy_resume(struct phy_device *phydev)
1189 1185
1190 return ret; 1186 return ret;
1191} 1187}
1188EXPORT_SYMBOL(__phy_resume);
1189
1190int phy_resume(struct phy_device *phydev)
1191{
1192 int ret;
1193
1194 mutex_lock(&phydev->lock);
1195 ret = __phy_resume(phydev);
1196 mutex_unlock(&phydev->lock);
1197
1198 return ret;
1199}
1192EXPORT_SYMBOL(phy_resume); 1200EXPORT_SYMBOL(phy_resume);
1193 1201
1194int phy_loopback(struct phy_device *phydev, bool enable) 1202int phy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 255a5def56e9..fa2a9bdd1866 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3161,6 +3161,15 @@ ppp_connect_channel(struct channel *pch, int unit)
3161 goto outl; 3161 goto outl;
3162 3162
3163 ppp_lock(ppp); 3163 ppp_lock(ppp);
3164 spin_lock_bh(&pch->downl);
3165 if (!pch->chan) {
3166 /* Don't connect unregistered channels */
3167 spin_unlock_bh(&pch->downl);
3168 ppp_unlock(ppp);
3169 ret = -ENOTCONN;
3170 goto outl;
3171 }
3172 spin_unlock_bh(&pch->downl);
3164 if (pch->file.hdrlen > ppp->file.hdrlen) 3173 if (pch->file.hdrlen > ppp->file.hdrlen)
3165 ppp->file.hdrlen = pch->file.hdrlen; 3174 ppp->file.hdrlen = pch->file.hdrlen;
3166 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 3175 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b52258c327d2..7433bb2e4451 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,7 +181,6 @@ struct tun_file {
181 struct tun_struct *detached; 181 struct tun_struct *detached;
182 struct ptr_ring tx_ring; 182 struct ptr_ring tx_ring;
183 struct xdp_rxq_info xdp_rxq; 183 struct xdp_rxq_info xdp_rxq;
184 int xdp_pending_pkts;
185}; 184};
186 185
187struct tun_flow_entry { 186struct tun_flow_entry {
@@ -1643,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1643 else 1642 else
1644 *skb_xdp = 0; 1643 *skb_xdp = 0;
1645 1644
1645 preempt_disable();
1646 rcu_read_lock(); 1646 rcu_read_lock();
1647 xdp_prog = rcu_dereference(tun->xdp_prog); 1647 xdp_prog = rcu_dereference(tun->xdp_prog);
1648 if (xdp_prog && !*skb_xdp) { 1648 if (xdp_prog && !*skb_xdp) {
@@ -1662,11 +1662,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1662 case XDP_REDIRECT: 1662 case XDP_REDIRECT:
1663 get_page(alloc_frag->page); 1663 get_page(alloc_frag->page);
1664 alloc_frag->offset += buflen; 1664 alloc_frag->offset += buflen;
1665 ++tfile->xdp_pending_pkts;
1666 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1665 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1666 xdp_do_flush_map();
1667 if (err) 1667 if (err)
1668 goto err_redirect; 1668 goto err_redirect;
1669 rcu_read_unlock(); 1669 rcu_read_unlock();
1670 preempt_enable();
1670 return NULL; 1671 return NULL;
1671 case XDP_TX: 1672 case XDP_TX:
1672 xdp_xmit = true; 1673 xdp_xmit = true;
@@ -1688,6 +1689,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1688 skb = build_skb(buf, buflen); 1689 skb = build_skb(buf, buflen);
1689 if (!skb) { 1690 if (!skb) {
1690 rcu_read_unlock(); 1691 rcu_read_unlock();
1692 preempt_enable();
1691 return ERR_PTR(-ENOMEM); 1693 return ERR_PTR(-ENOMEM);
1692 } 1694 }
1693 1695
@@ -1700,10 +1702,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1700 skb->dev = tun->dev; 1702 skb->dev = tun->dev;
1701 generic_xdp_tx(skb, xdp_prog); 1703 generic_xdp_tx(skb, xdp_prog);
1702 rcu_read_unlock(); 1704 rcu_read_unlock();
1705 preempt_enable();
1703 return NULL; 1706 return NULL;
1704 } 1707 }
1705 1708
1706 rcu_read_unlock(); 1709 rcu_read_unlock();
1710 preempt_enable();
1707 1711
1708 return skb; 1712 return skb;
1709 1713
@@ -1711,6 +1715,7 @@ err_redirect:
1711 put_page(alloc_frag->page); 1715 put_page(alloc_frag->page);
1712err_xdp: 1716err_xdp:
1713 rcu_read_unlock(); 1717 rcu_read_unlock();
1718 preempt_enable();
1714 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1719 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1715 return NULL; 1720 return NULL;
1716} 1721}
@@ -1984,11 +1989,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
1984 result = tun_get_user(tun, tfile, NULL, from, 1989 result = tun_get_user(tun, tfile, NULL, from,
1985 file->f_flags & O_NONBLOCK, false); 1990 file->f_flags & O_NONBLOCK, false);
1986 1991
1987 if (tfile->xdp_pending_pkts) {
1988 tfile->xdp_pending_pkts = 0;
1989 xdp_do_flush_map();
1990 }
1991
1992 tun_put(tun); 1992 tun_put(tun);
1993 return result; 1993 return result;
1994} 1994}
@@ -2325,13 +2325,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
2326 m->msg_flags & MSG_DONTWAIT, 2326 m->msg_flags & MSG_DONTWAIT,
2327 m->msg_flags & MSG_MORE); 2327 m->msg_flags & MSG_MORE);
2328
2329 if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
2330 !(m->msg_flags & MSG_MORE)) {
2331 tfile->xdp_pending_pkts = 0;
2332 xdp_do_flush_map();
2333 }
2334
2335 tun_put(tun); 2328 tun_put(tun);
2336 return ret; 2329 return ret;
2337} 2330}
@@ -3163,7 +3156,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3163 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3156 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3164 3157
3165 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3158 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
3166 tfile->xdp_pending_pkts = 0;
3167 3159
3168 return 0; 3160 return 0;
3169} 3161}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 05dca3e5c93d..fff4b13eece2 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -896,6 +896,12 @@ static const struct usb_device_id products[] = {
896 USB_CDC_PROTO_NONE), 896 USB_CDC_PROTO_NONE),
897 .driver_info = (unsigned long)&wwan_info, 897 .driver_info = (unsigned long)&wwan_info,
898}, { 898}, {
899 /* Cinterion PLS8 modem by GEMALTO */
900 USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM,
901 USB_CDC_SUBCLASS_ETHERNET,
902 USB_CDC_PROTO_NONE),
903 .driver_info = (unsigned long)&wwan_info,
904}, {
899 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 905 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
900 USB_CDC_PROTO_NONE), 906 USB_CDC_PROTO_NONE),
901 .driver_info = (unsigned long) &cdc_info, 907 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 958b2e8b90f6..86f7196f9d91 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
1794 1794
1795 tx_data += len; 1795 tx_data += len;
1796 agg->skb_len += len; 1796 agg->skb_len += len;
1797 agg->skb_num++; 1797 agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
1798 1798
1799 dev_kfree_skb_any(skb); 1799 dev_kfree_skb_any(skb);
1800 1800
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bb9e562b893..23374603e4d9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -504,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
504 page_off += *len; 504 page_off += *len;
505 505
506 while (--*num_buf) { 506 while (--*num_buf) {
507 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
507 unsigned int buflen; 508 unsigned int buflen;
508 void *buf; 509 void *buf;
509 int off; 510 int off;
@@ -518,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
518 /* guard against a misconfigured or uncooperative backend that 519 /* guard against a misconfigured or uncooperative backend that
519 * is sending packet larger than the MTU. 520 * is sending packet larger than the MTU.
520 */ 521 */
521 if ((page_off + buflen) > PAGE_SIZE) { 522 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
522 put_page(p); 523 put_page(p);
523 goto err_buf; 524 goto err_buf;
524 } 525 }
@@ -690,6 +691,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
690 unsigned int truesize; 691 unsigned int truesize;
691 unsigned int headroom = mergeable_ctx_to_headroom(ctx); 692 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
692 bool sent; 693 bool sent;
694 int err;
693 695
694 head_skb = NULL; 696 head_skb = NULL;
695 697
@@ -701,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
701 void *data; 703 void *data;
702 u32 act; 704 u32 act;
703 705
704 /* This happens when rx buffer size is underestimated */ 706 /* This happens when rx buffer size is underestimated
707 * or headroom is not enough because of the buffer
708 * was refilled before XDP is set. This should only
709 * happen for the first several packets, so we don't
710 * care much about its performance.
711 */
705 if (unlikely(num_buf > 1 || 712 if (unlikely(num_buf > 1 ||
706 headroom < virtnet_get_headroom(vi))) { 713 headroom < virtnet_get_headroom(vi))) {
707 /* linearize data for XDP */ 714 /* linearize data for XDP */
@@ -736,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
736 743
737 act = bpf_prog_run_xdp(xdp_prog, &xdp); 744 act = bpf_prog_run_xdp(xdp_prog, &xdp);
738 745
739 if (act != XDP_PASS)
740 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
741
742 switch (act) { 746 switch (act) {
743 case XDP_PASS: 747 case XDP_PASS:
744 /* recalculate offset to account for any header 748 /* recalculate offset to account for any header
@@ -770,6 +774,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
770 goto err_xdp; 774 goto err_xdp;
771 rcu_read_unlock(); 775 rcu_read_unlock();
772 goto xdp_xmit; 776 goto xdp_xmit;
777 case XDP_REDIRECT:
778 err = xdp_do_redirect(dev, &xdp, xdp_prog);
779 if (err) {
780 if (unlikely(xdp_page != page))
781 put_page(xdp_page);
782 goto err_xdp;
783 }
784 *xdp_xmit = true;
785 if (unlikely(xdp_page != page))
786 goto err_xdp;
787 rcu_read_unlock();
788 goto xdp_xmit;
773 default: 789 default:
774 bpf_warn_invalid_xdp_action(act); 790 bpf_warn_invalid_xdp_action(act);
775 case XDP_ABORTED: 791 case XDP_ABORTED:
@@ -1013,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1013} 1029}
1014 1030
1015static unsigned int get_mergeable_buf_len(struct receive_queue *rq, 1031static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1016 struct ewma_pkt_len *avg_pkt_len) 1032 struct ewma_pkt_len *avg_pkt_len,
1033 unsigned int room)
1017{ 1034{
1018 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 1035 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1019 unsigned int len; 1036 unsigned int len;
1020 1037
1021 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 1038 if (room)
1039 return PAGE_SIZE - room;
1040
1041 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1022 rq->min_buf_len, PAGE_SIZE - hdr_len); 1042 rq->min_buf_len, PAGE_SIZE - hdr_len);
1043
1023 return ALIGN(len, L1_CACHE_BYTES); 1044 return ALIGN(len, L1_CACHE_BYTES);
1024} 1045}
1025 1046
@@ -1028,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
1028{ 1049{
1029 struct page_frag *alloc_frag = &rq->alloc_frag; 1050 struct page_frag *alloc_frag = &rq->alloc_frag;
1030 unsigned int headroom = virtnet_get_headroom(vi); 1051 unsigned int headroom = virtnet_get_headroom(vi);
1052 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1053 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1031 char *buf; 1054 char *buf;
1032 void *ctx; 1055 void *ctx;
1033 int err; 1056 int err;
1034 unsigned int len, hole; 1057 unsigned int len, hole;
1035 1058
1036 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); 1059 /* Extra tailroom is needed to satisfy XDP's assumption. This
1037 if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) 1060 * means rx frags coalescing won't work, but consider we've
1061 * disabled GSO for XDP, it won't be a big issue.
1062 */
1063 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1064 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1038 return -ENOMEM; 1065 return -ENOMEM;
1039 1066
1040 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1067 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1041 buf += headroom; /* advance address leaving hole at front of pkt */ 1068 buf += headroom; /* advance address leaving hole at front of pkt */
1042 get_page(alloc_frag->page); 1069 get_page(alloc_frag->page);
1043 alloc_frag->offset += len + headroom; 1070 alloc_frag->offset += len + room;
1044 hole = alloc_frag->size - alloc_frag->offset; 1071 hole = alloc_frag->size - alloc_frag->offset;
1045 if (hole < len + headroom) { 1072 if (hole < len + room) {
1046 /* To avoid internal fragmentation, if there is very likely not 1073 /* To avoid internal fragmentation, if there is very likely not
1047 * enough space for another buffer, add the remaining space to 1074 * enough space for another buffer, add the remaining space to
1048 * the current buffer. 1075 * the current buffer.
@@ -2185,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2185 } 2212 }
2186 2213
2187 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2214 /* Make sure NAPI is not using any XDP TX queues for RX. */
2188 for (i = 0; i < vi->max_queue_pairs; i++) 2215 if (netif_running(dev))
2189 napi_disable(&vi->rq[i].napi); 2216 for (i = 0; i < vi->max_queue_pairs; i++)
2217 napi_disable(&vi->rq[i].napi);
2190 2218
2191 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); 2219 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2192 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2220 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
@@ -2205,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2205 } 2233 }
2206 if (old_prog) 2234 if (old_prog)
2207 bpf_prog_put(old_prog); 2235 bpf_prog_put(old_prog);
2208 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2236 if (netif_running(dev))
2237 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2209 } 2238 }
2210 2239
2211 return 0; 2240 return 0;
@@ -2576,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2576{ 2605{
2577 struct virtnet_info *vi = netdev_priv(queue->dev); 2606 struct virtnet_info *vi = netdev_priv(queue->dev);
2578 unsigned int queue_index = get_netdev_rx_queue_index(queue); 2607 unsigned int queue_index = get_netdev_rx_queue_index(queue);
2608 unsigned int headroom = virtnet_get_headroom(vi);
2609 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2579 struct ewma_pkt_len *avg; 2610 struct ewma_pkt_len *avg;
2580 2611
2581 BUG_ON(queue_index >= vi->max_queue_pairs); 2612 BUG_ON(queue_index >= vi->max_queue_pairs);
2582 avg = &vi->rq[queue_index].mrg_avg_pkt_len; 2613 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2583 return sprintf(buf, "%u\n", 2614 return sprintf(buf, "%u\n",
2584 get_mergeable_buf_len(&vi->rq[queue_index], avg)); 2615 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2616 SKB_DATA_ALIGN(headroom + tailroom)));
2585} 2617}
2586 2618
2587static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = 2619static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index afeca6bcdade..ab8b3cbbb205 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t)
574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575 0, NULL); 575 0, NULL);
576 proto->restart_counter--; 576 proto->restart_counter--;
577 } else 577 } else if (netif_carrier_ok(proto->dev))
578 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
579 0, NULL);
580 else
578 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 581 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579 0, NULL); 582 0, NULL);
580 break; 583 break;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8328d395e332..3127bc8633ca 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev,
2005 case XenbusStateInitialised: 2005 case XenbusStateInitialised:
2006 case XenbusStateReconfiguring: 2006 case XenbusStateReconfiguring:
2007 case XenbusStateReconfigured: 2007 case XenbusStateReconfigured:
2008 break;
2009
2008 case XenbusStateUnknown: 2010 case XenbusStateUnknown:
2011 wake_up_all(&module_unload_q);
2009 break; 2012 break;
2010 2013
2011 case XenbusStateInitWait: 2014 case XenbusStateInitWait:
@@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev)
2136 xenbus_switch_state(dev, XenbusStateClosing); 2139 xenbus_switch_state(dev, XenbusStateClosing);
2137 wait_event(module_unload_q, 2140 wait_event(module_unload_q,
2138 xenbus_read_driver_state(dev->otherend) == 2141 xenbus_read_driver_state(dev->otherend) ==
2139 XenbusStateClosing); 2142 XenbusStateClosing ||
2143 xenbus_read_driver_state(dev->otherend) ==
2144 XenbusStateUnknown);
2140 2145
2141 xenbus_switch_state(dev, XenbusStateClosed); 2146 xenbus_switch_state(dev, XenbusStateClosed);
2142 wait_event(module_unload_q, 2147 wait_event(module_unload_q,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 10041ac4032c..06f8dcc52ca6 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -335,8 +335,7 @@ static int pmem_attach_disk(struct device *dev,
335 dev_warn(dev, "unable to guarantee persistence of writes\n"); 335 dev_warn(dev, "unable to guarantee persistence of writes\n");
336 fua = 0; 336 fua = 0;
337 } 337 }
338 wbc = nvdimm_has_cache(nd_region) && 338 wbc = nvdimm_has_cache(nd_region);
339 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
340 339
341 if (!devm_request_mem_region(dev, res->start, resource_size(res), 340 if (!devm_request_mem_region(dev, res->start, resource_size(res),
342 dev_name(&ndns->dev))) { 341 dev_name(&ndns->dev))) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0fe7ea35c221..7aeca5db7916 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2844,7 +2844,7 @@ out:
2844} 2844}
2845 2845
2846static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 2846static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
2847 struct nvme_id_ns *id, bool *new) 2847 struct nvme_id_ns *id)
2848{ 2848{
2849 struct nvme_ctrl *ctrl = ns->ctrl; 2849 struct nvme_ctrl *ctrl = ns->ctrl;
2850 bool is_shared = id->nmic & (1 << 0); 2850 bool is_shared = id->nmic & (1 << 0);
@@ -2860,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
2860 ret = PTR_ERR(head); 2860 ret = PTR_ERR(head);
2861 goto out_unlock; 2861 goto out_unlock;
2862 } 2862 }
2863
2864 *new = true;
2865 } else { 2863 } else {
2866 struct nvme_ns_ids ids; 2864 struct nvme_ns_ids ids;
2867 2865
@@ -2873,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
2873 ret = -EINVAL; 2871 ret = -EINVAL;
2874 goto out_unlock; 2872 goto out_unlock;
2875 } 2873 }
2876
2877 *new = false;
2878 } 2874 }
2879 2875
2880 list_add_tail(&ns->siblings, &head->list); 2876 list_add_tail(&ns->siblings, &head->list);
@@ -2945,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2945 struct nvme_id_ns *id; 2941 struct nvme_id_ns *id;
2946 char disk_name[DISK_NAME_LEN]; 2942 char disk_name[DISK_NAME_LEN];
2947 int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; 2943 int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
2948 bool new = true;
2949 2944
2950 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 2945 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
2951 if (!ns) 2946 if (!ns)
@@ -2971,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
2971 if (id->ncap == 0) 2966 if (id->ncap == 0)
2972 goto out_free_id; 2967 goto out_free_id;
2973 2968
2974 if (nvme_init_ns_head(ns, nsid, id, &new)) 2969 if (nvme_init_ns_head(ns, nsid, id))
2975 goto out_free_id; 2970 goto out_free_id;
2976 nvme_setup_streams_ns(ctrl, ns); 2971 nvme_setup_streams_ns(ctrl, ns);
2977 2972
@@ -3037,9 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3037 pr_warn("%s: failed to register lightnvm sysfs group for identification\n", 3032 pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
3038 ns->disk->disk_name); 3033 ns->disk->disk_name);
3039 3034
3040 if (new) 3035 nvme_mpath_add_disk(ns->head);
3041 nvme_mpath_add_disk(ns->head);
3042 nvme_mpath_add_disk_links(ns);
3043 return; 3036 return;
3044 out_unlink_ns: 3037 out_unlink_ns:
3045 mutex_lock(&ctrl->subsys->lock); 3038 mutex_lock(&ctrl->subsys->lock);
@@ -3059,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3059 return; 3052 return;
3060 3053
3061 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3054 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3062 nvme_mpath_remove_disk_links(ns);
3063 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 3055 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
3064 &nvme_ns_id_attr_group); 3056 &nvme_ns_id_attr_group);
3065 if (ns->ndev) 3057 if (ns->ndev)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5dd4ceefed8f..8f0f34d06d46 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
493 */ 493 */
494int nvmf_register_transport(struct nvmf_transport_ops *ops) 494int nvmf_register_transport(struct nvmf_transport_ops *ops)
495{ 495{
496 if (!ops->create_ctrl || !ops->module) 496 if (!ops->create_ctrl)
497 return -EINVAL; 497 return -EINVAL;
498 498
499 down_write(&nvmf_transports_rwsem); 499 down_write(&nvmf_transports_rwsem);
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
650 ret = -EINVAL; 650 ret = -EINVAL;
651 goto out; 651 goto out;
652 } 652 }
653 if (opts->discovery_nqn) {
654 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
655 break;
656 }
657
653 opts->nr_io_queues = min_t(unsigned int, 658 opts->nr_io_queues = min_t(unsigned int,
654 num_online_cpus(), token); 659 num_online_cpus(), token);
655 break; 660 break;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7f51f8414b97..1dc1387b7134 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1207 1207
1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); 1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1210 /* Linux supports only Dynamic controllers */ 1210 /* Linux supports only Dynamic controllers */
1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); 1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1325 1325
1326 lsop->queue = queue; 1326 lsop->queue = queue;
1327 lsreq->rqstaddr = conn_rqst; 1327 lsreq->rqstaddr = conn_rqst;
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2481 goto out_free_tag_set; 2481 goto out_free_tag_set;
2482 } 2482 }
2483 2483
2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2485 if (ret) 2485 if (ret)
2486 goto out_cleanup_blk_queue; 2486 goto out_cleanup_blk_queue;
2487 2487
2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2489 if (ret) 2489 if (ret)
2490 goto out_delete_hw_queues; 2490 goto out_delete_hw_queues;
2491 2491
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2532 if (ret) 2532 if (ret)
2533 goto out_free_io_queues; 2533 goto out_free_io_queues;
2534 2534
2535 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2535 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2536 if (ret) 2536 if (ret)
2537 goto out_free_io_queues; 2537 goto out_free_io_queues;
2538 2538
2539 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2539 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2540 if (ret) 2540 if (ret)
2541 goto out_delete_hw_queues; 2541 goto out_delete_hw_queues;
2542 2542
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2632 nvme_fc_init_queue(ctrl, 0); 2632 nvme_fc_init_queue(ctrl, 0);
2633 2633
2634 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 2634 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2635 NVME_AQ_BLK_MQ_DEPTH); 2635 NVME_AQ_DEPTH);
2636 if (ret) 2636 if (ret)
2637 goto out_free_queue; 2637 goto out_free_queue;
2638 2638
2639 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 2639 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2640 NVME_AQ_BLK_MQ_DEPTH, 2640 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2641 (NVME_AQ_BLK_MQ_DEPTH / 4));
2642 if (ret) 2641 if (ret)
2643 goto out_delete_hw_queue; 2642 goto out_delete_hw_queue;
2644 2643
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2666 } 2665 }
2667 2666
2668 ctrl->ctrl.sqsize = 2667 ctrl->ctrl.sqsize =
2669 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); 2668 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2670 2669
2671 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 2670 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2672 if (ret) 2671 if (ret)
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2699 opts->queue_size = ctrl->ctrl.maxcmd; 2698 opts->queue_size = ctrl->ctrl.maxcmd;
2700 } 2699 }
2701 2700
2701 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2702 /* warn if sqsize is lower than queue_size */
2703 dev_warn(ctrl->ctrl.device,
2704 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2705 opts->queue_size, ctrl->ctrl.sqsize + 1);
2706 opts->queue_size = ctrl->ctrl.sqsize + 1;
2707 }
2708
2702 ret = nvme_fc_init_aen_ops(ctrl); 2709 ret = nvme_fc_init_aen_ops(ctrl);
2703 if (ret) 2710 if (ret)
2704 goto out_term_aen_ops; 2711 goto out_term_aen_ops;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 3b211d9e58b8..060f69e03427 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -198,30 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
198{ 198{
199 if (!head->disk) 199 if (!head->disk)
200 return; 200 return;
201 device_add_disk(&head->subsys->dev, head->disk);
202 if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
203 &nvme_ns_id_attr_group))
204 pr_warn("%s: failed to create sysfs group for identification\n",
205 head->disk->disk_name);
206}
207
208void nvme_mpath_add_disk_links(struct nvme_ns *ns)
209{
210 struct kobject *slave_disk_kobj, *holder_disk_kobj;
211
212 if (!ns->head->disk)
213 return;
214
215 slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
216 if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
217 kobject_name(slave_disk_kobj)))
218 return;
219 201
220 holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj; 202 mutex_lock(&head->subsys->lock);
221 if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj, 203 if (!(head->disk->flags & GENHD_FL_UP)) {
222 kobject_name(holder_disk_kobj))) 204 device_add_disk(&head->subsys->dev, head->disk);
223 sysfs_remove_link(ns->head->disk->slave_dir, 205 if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
224 kobject_name(slave_disk_kobj)); 206 &nvme_ns_id_attr_group))
207 pr_warn("%s: failed to create sysfs group for identification\n",
208 head->disk->disk_name);
209 }
210 mutex_unlock(&head->subsys->lock);
225} 211}
226 212
227void nvme_mpath_remove_disk(struct nvme_ns_head *head) 213void nvme_mpath_remove_disk(struct nvme_ns_head *head)
@@ -238,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
238 blk_cleanup_queue(head->disk->queue); 224 blk_cleanup_queue(head->disk->queue);
239 put_disk(head->disk); 225 put_disk(head->disk);
240} 226}
241
242void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
243{
244 if (!ns->head->disk)
245 return;
246
247 sysfs_remove_link(ns->disk->part0.holder_dir,
248 kobject_name(&disk_to_dev(ns->head->disk)->kobj));
249 sysfs_remove_link(ns->head->disk->slave_dir,
250 kobject_name(&disk_to_dev(ns->disk)->kobj));
251}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0521e4707d1c..d733b14ede9d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
410void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 410void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
411int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 411int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
412void nvme_mpath_add_disk(struct nvme_ns_head *head); 412void nvme_mpath_add_disk(struct nvme_ns_head *head);
413void nvme_mpath_add_disk_links(struct nvme_ns *ns);
414void nvme_mpath_remove_disk(struct nvme_ns_head *head); 413void nvme_mpath_remove_disk(struct nvme_ns_head *head);
415void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
416 414
417static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 415static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
418{ 416{
@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
454static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 452static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
455{ 453{
456} 454}
457static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
458{
459}
460static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
461{
462}
463static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 455static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
464{ 456{
465} 457}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 73036d2fbbd5..b6f43b738f03 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1153 if (!(csts & NVME_CSTS_CFS) && !nssro) 1153 if (!(csts & NVME_CSTS_CFS) && !nssro)
1154 return false; 1154 return false;
1155 1155
1156 /* If PCI error recovery process is happening, we cannot reset or
1157 * the recovery mechanism will surely fail.
1158 */
1159 if (pci_channel_offline(to_pci_dev(dev->dev)))
1160 return false;
1161
1162 return true; 1156 return true;
1163} 1157}
1164 1158
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1189 struct nvme_command cmd; 1183 struct nvme_command cmd;
1190 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1184 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1191 1185
1186 /* If PCI error recovery process is happening, we cannot reset or
1187 * the recovery mechanism will surely fail.
1188 */
1189 mb();
1190 if (pci_channel_offline(to_pci_dev(dev->dev)))
1191 return BLK_EH_RESET_TIMER;
1192
1192 /* 1193 /*
1193 * Reset immediately if the controller is failed 1194 * Reset immediately if the controller is failed
1194 */ 1195 */
@@ -1459,7 +1460,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1459 nvmeq->cq_vector = qid - 1; 1460 nvmeq->cq_vector = qid - 1;
1460 result = adapter_alloc_cq(dev, qid, nvmeq); 1461 result = adapter_alloc_cq(dev, qid, nvmeq);
1461 if (result < 0) 1462 if (result < 0)
1462 return result; 1463 goto release_vector;
1463 1464
1464 result = adapter_alloc_sq(dev, qid, nvmeq); 1465 result = adapter_alloc_sq(dev, qid, nvmeq);
1465 if (result < 0) 1466 if (result < 0)
@@ -1473,9 +1474,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1473 return result; 1474 return result;
1474 1475
1475 release_sq: 1476 release_sq:
1477 dev->online_queues--;
1476 adapter_delete_sq(dev, qid); 1478 adapter_delete_sq(dev, qid);
1477 release_cq: 1479 release_cq:
1478 adapter_delete_cq(dev, qid); 1480 adapter_delete_cq(dev, qid);
1481 release_vector:
1482 nvmeq->cq_vector = -1;
1479 return result; 1483 return result;
1480} 1484}
1481 1485
@@ -1910,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1910 int result, nr_io_queues; 1914 int result, nr_io_queues;
1911 unsigned long size; 1915 unsigned long size;
1912 1916
1913 nr_io_queues = num_present_cpus(); 1917 nr_io_queues = num_possible_cpus();
1914 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 1918 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1915 if (result < 0) 1919 if (result < 0)
1916 return result; 1920 return result;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3a51ed50eff2..4d84a73ee12d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1051 struct nvme_rdma_device *dev = queue->device; 1051 struct nvme_rdma_device *dev = queue->device;
1052 struct ib_device *ibdev = dev->dev; 1052 struct ib_device *ibdev = dev->dev;
1053 1053
1054 if (!blk_rq_bytes(rq)) 1054 if (!blk_rq_payload_bytes(rq))
1055 return; 1055 return;
1056 1056
1057 if (req->mr) { 1057 if (req->mr) {
@@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1166 1166
1167 c->common.flags |= NVME_CMD_SGL_METABUF; 1167 c->common.flags |= NVME_CMD_SGL_METABUF;
1168 1168
1169 if (!blk_rq_bytes(rq)) 1169 if (!blk_rq_payload_bytes(rq))
1170 return nvme_rdma_set_sg_null(c); 1170 return nvme_rdma_set_sg_null(c);
1171 1171
1172 req->sg_table.sgl = req->first_sgl; 1172 req->sg_table.sgl = req->first_sgl;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 0bd737117a80..a78029e4e5f4 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
520 goto fail; 520 goto fail;
521 } 521 }
522 522
523 /* either variant of SGLs is fine, as we don't support metadata */ 523 /*
524 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && 524 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
525 (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { 525 * contains an address of a single contiguous physical buffer that is
526 * byte aligned.
527 */
528 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
526 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; 529 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
527 goto fail; 530 goto fail;
528 } 531 }
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 7991ec3a17db..861d1509b22b 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
184 return BLK_STS_OK; 184 return BLK_STS_OK;
185 } 185 }
186 186
187 if (blk_rq_bytes(req)) { 187 if (blk_rq_payload_bytes(req)) {
188 iod->sg_table.sgl = iod->first_sgl; 188 iod->sg_table.sgl = iod->first_sgl;
189 if (sg_alloc_table_chained(&iod->sg_table, 189 if (sg_alloc_table_chained(&iod->sg_table,
190 blk_rq_nr_phys_segments(req), 190 blk_rq_nr_phys_segments(req),
@@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
193 193
194 iod->req.sg = iod->sg_table.sgl; 194 iod->req.sg = iod->sg_table.sgl;
195 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 195 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
196 iod->req.transfer_len = blk_rq_bytes(req); 196 iod->req.transfer_len = blk_rq_payload_bytes(req);
197 } 197 }
198 198
199 blk_mq_start_request(req); 199 blk_mq_start_request(req);
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 8de2d5c69b1d..dc9303abda42 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
613 /* setup bus numbers */ 613 /* setup bus numbers */
614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
615 val &= 0xff000000; 615 val &= 0xff000000;
616 val |= 0x00010100; 616 val |= 0x00ff0100;
617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
618 618
619 /* setup command register */ 619 /* setup command register */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 369d48d6c6f1..365447240d95 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno)
401 struct resource *res = dev->resource + resno; 401 struct resource *res = dev->resource + resno;
402 402
403 pci_info(dev, "BAR %d: releasing %pR\n", resno, res); 403 pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
404
405 if (!res->parent)
406 return;
407
404 release_resource(res); 408 release_resource(res);
405 res->end = resource_size(res) - 1; 409 res->end = resource_size(res) - 1;
406 res->start = 0; 410 res->start = 0;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 0c2ed11c0603..f63db346c219 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -638,7 +638,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
638 if (irq_is_percpu_devid(irq)) 638 if (irq_is_percpu_devid(irq))
639 disable_percpu_irq(irq); 639 disable_percpu_irq(irq);
640 else 640 else
641 disable_irq(irq); 641 disable_irq_nosync(irq);
642 } 642 }
643 643
644 per_cpu(cpu_armpmu, cpu) = NULL; 644 per_cpu(cpu_armpmu, cpu) = NULL;
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index c5ff4525edef..c5493ea51282 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -675,3 +675,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
675 return 0; 675 return 0;
676} 676}
677EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); 677EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
678
679MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
680MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
681MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
682MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
index 1fda9d6c7ea3..4b91ff74779b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = {
716 "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", 716 "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x",
717}; 717};
718 718
719static const char * const uart_ao_b_gpioz_groups[] = { 719static const char * const uart_ao_b_z_groups[] = {
720 "uart_ao_tx_b_z", "uart_ao_rx_b_z", 720 "uart_ao_tx_b_z", "uart_ao_rx_b_z",
721 "uart_ao_cts_b_z", "uart_ao_rts_b_z", 721 "uart_ao_cts_b_z", "uart_ao_rts_b_z",
722}; 722};
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
855 FUNCTION(nand), 855 FUNCTION(nand),
856 FUNCTION(uart_a), 856 FUNCTION(uart_a),
857 FUNCTION(uart_b), 857 FUNCTION(uart_b),
858 FUNCTION(uart_ao_b_gpioz), 858 FUNCTION(uart_ao_b_z),
859 FUNCTION(i2c0), 859 FUNCTION(i2c0),
860 FUNCTION(i2c1), 860 FUNCTION(i2c1),
861 FUNCTION(i2c2), 861 FUNCTION(i2c2),
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 6dec6ab13300..d8599736a41a 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev)
423 return ret; 423 return ret;
424} 424}
425 425
426static const struct chromeos_laptop samsung_series_5_550 = { 426static struct chromeos_laptop samsung_series_5_550 = {
427 .i2c_peripherals = { 427 .i2c_peripherals = {
428 /* Touchpad. */ 428 /* Touchpad. */
429 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 429 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
@@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = {
432 }, 432 },
433}; 433};
434 434
435static const struct chromeos_laptop samsung_series_5 = { 435static struct chromeos_laptop samsung_series_5 = {
436 .i2c_peripherals = { 436 .i2c_peripherals = {
437 /* Light Sensor. */ 437 /* Light Sensor. */
438 { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, 438 { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
439 }, 439 },
440}; 440};
441 441
442static const struct chromeos_laptop chromebook_pixel = { 442static struct chromeos_laptop chromebook_pixel = {
443 .i2c_peripherals = { 443 .i2c_peripherals = {
444 /* Touch Screen. */ 444 /* Touch Screen. */
445 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, 445 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
@@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = {
450 }, 450 },
451}; 451};
452 452
453static const struct chromeos_laptop hp_chromebook_14 = { 453static struct chromeos_laptop hp_chromebook_14 = {
454 .i2c_peripherals = { 454 .i2c_peripherals = {
455 /* Touchpad. */ 455 /* Touchpad. */
456 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 456 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
457 }, 457 },
458}; 458};
459 459
460static const struct chromeos_laptop dell_chromebook_11 = { 460static struct chromeos_laptop dell_chromebook_11 = {
461 .i2c_peripherals = { 461 .i2c_peripherals = {
462 /* Touchpad. */ 462 /* Touchpad. */
463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
@@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = {
466 }, 466 },
467}; 467};
468 468
469static const struct chromeos_laptop toshiba_cb35 = { 469static struct chromeos_laptop toshiba_cb35 = {
470 .i2c_peripherals = { 470 .i2c_peripherals = {
471 /* Touchpad. */ 471 /* Touchpad. */
472 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 472 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
473 }, 473 },
474}; 474};
475 475
476static const struct chromeos_laptop acer_c7_chromebook = { 476static struct chromeos_laptop acer_c7_chromebook = {
477 .i2c_peripherals = { 477 .i2c_peripherals = {
478 /* Touchpad. */ 478 /* Touchpad. */
479 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 479 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
480 }, 480 },
481}; 481};
482 482
483static const struct chromeos_laptop acer_ac700 = { 483static struct chromeos_laptop acer_ac700 = {
484 .i2c_peripherals = { 484 .i2c_peripherals = {
485 /* Light Sensor. */ 485 /* Light Sensor. */
486 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, 486 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
487 }, 487 },
488}; 488};
489 489
490static const struct chromeos_laptop acer_c720 = { 490static struct chromeos_laptop acer_c720 = {
491 .i2c_peripherals = { 491 .i2c_peripherals = {
492 /* Touchscreen. */ 492 /* Touchscreen. */
493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, 493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
@@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = {
500 }, 500 },
501}; 501};
502 502
503static const struct chromeos_laptop hp_pavilion_14_chromebook = { 503static struct chromeos_laptop hp_pavilion_14_chromebook = {
504 .i2c_peripherals = { 504 .i2c_peripherals = {
505 /* Touchpad. */ 505 /* Touchpad. */
506 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 506 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
507 }, 507 },
508}; 508};
509 509
510static const struct chromeos_laptop cr48 = { 510static struct chromeos_laptop cr48 = {
511 .i2c_peripherals = { 511 .i2c_peripherals = {
512 /* Light Sensor. */ 512 /* Light Sensor. */
513 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, 513 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 9a8f96465cdc..51ebc5a6053f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -105,31 +105,45 @@ config ASUS_LAPTOP
105 105
106 If you have an ACPI-compatible ASUS laptop, say Y or M here. 106 If you have an ACPI-compatible ASUS laptop, say Y or M here.
107 107
108#
109# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
110# backends are selected. The "depends" line prevents a configuration
111# where DELL_SMBIOS=y while either of those dependencies =m.
112#
108config DELL_SMBIOS 113config DELL_SMBIOS
109 tristate 114 tristate "Dell SMBIOS driver"
115 depends on DCDBAS || DCDBAS=n
116 depends on ACPI_WMI || ACPI_WMI=n
117 ---help---
118 This provides support for the Dell SMBIOS calling interface.
119 If you have a Dell computer you should enable this option.
120
121 Be sure to select at least one backend for it to work properly.
110 122
111config DELL_SMBIOS_WMI 123config DELL_SMBIOS_WMI
112 tristate "Dell SMBIOS calling interface (WMI implementation)" 124 bool "Dell SMBIOS driver WMI backend"
125 default y
113 depends on ACPI_WMI 126 depends on ACPI_WMI
114 select DELL_WMI_DESCRIPTOR 127 select DELL_WMI_DESCRIPTOR
115 select DELL_SMBIOS 128 depends on DELL_SMBIOS
116 ---help--- 129 ---help---
117 This provides an implementation for the Dell SMBIOS calling interface 130 This provides an implementation for the Dell SMBIOS calling interface
118 communicated over ACPI-WMI. 131 communicated over ACPI-WMI.
119 132
120 If you have a Dell computer from >2007 you should say Y or M here. 133 If you have a Dell computer from >2007 you should say Y here.
121 If you aren't sure and this module doesn't work for your computer 134 If you aren't sure and this module doesn't work for your computer
122 it just won't load. 135 it just won't load.
123 136
124config DELL_SMBIOS_SMM 137config DELL_SMBIOS_SMM
125 tristate "Dell SMBIOS calling interface (SMM implementation)" 138 bool "Dell SMBIOS driver SMM backend"
139 default y
126 depends on DCDBAS 140 depends on DCDBAS
127 select DELL_SMBIOS 141 depends on DELL_SMBIOS
128 ---help--- 142 ---help---
129 This provides an implementation for the Dell SMBIOS calling interface 143 This provides an implementation for the Dell SMBIOS calling interface
130 communicated over SMI/SMM. 144 communicated over SMI/SMM.
131 145
132 If you have a Dell computer from <=2017 you should say Y or M here. 146 If you have a Dell computer from <=2017 you should say Y here.
133 If you aren't sure and this module doesn't work for your computer 147 If you aren't sure and this module doesn't work for your computer
134 it just won't load. 148 it just won't load.
135 149
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c388608ad2a3..2ba6cb795338 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
13obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o 13obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
14obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o 14obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
15obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o 15obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
16obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o 16dell-smbios-objs := dell-smbios-base.o
17obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o 17dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
18dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
18obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o 19obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
19obj-$(CONFIG_DELL_WMI) += dell-wmi.o 20obj-$(CONFIG_DELL_WMI) += dell-wmi.o
20obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o 21obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c
index 8541cde4cb7d..2485c80a9fdd 100644
--- a/drivers/platform/x86/dell-smbios.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex);
36struct smbios_device { 36struct smbios_device {
37 struct list_head list; 37 struct list_head list;
38 struct device *device; 38 struct device *device;
39 int (*call_fn)(struct calling_interface_buffer *); 39 int (*call_fn)(struct calling_interface_buffer *arg);
40}; 40};
41 41
42struct smbios_call { 42struct smbios_call {
@@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm)
352 struct calling_interface_structure *table = 352 struct calling_interface_structure *table =
353 container_of(dm, struct calling_interface_structure, header); 353 container_of(dm, struct calling_interface_structure, header);
354 354
355 /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least 355 /*
356 6 bytes of entry */ 356 * 4 bytes of table header, plus 7 bytes of Dell header
357 * plus at least 6 bytes of entry
358 */
357 359
358 if (dm->length < 17) 360 if (dm->length < 17)
359 return; 361 return;
@@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev)
554static int __init dell_smbios_init(void) 556static int __init dell_smbios_init(void)
555{ 557{
556 const struct dmi_device *valid; 558 const struct dmi_device *valid;
557 int ret; 559 int ret, wmi, smm;
558 560
559 valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); 561 valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);
560 if (!valid) { 562 if (!valid) {
@@ -589,8 +591,24 @@ static int __init dell_smbios_init(void)
589 if (ret) 591 if (ret)
590 goto fail_create_group; 592 goto fail_create_group;
591 593
594 /* register backends */
595 wmi = init_dell_smbios_wmi();
596 if (wmi)
597 pr_debug("Failed to initialize WMI backend: %d\n", wmi);
598 smm = init_dell_smbios_smm();
599 if (smm)
600 pr_debug("Failed to initialize SMM backend: %d\n", smm);
601 if (wmi && smm) {
602 pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
603 wmi, smm);
604 goto fail_sysfs;
605 }
606
592 return 0; 607 return 0;
593 608
609fail_sysfs:
610 free_group(platform_device);
611
594fail_create_group: 612fail_create_group:
595 platform_device_del(platform_device); 613 platform_device_del(platform_device);
596 614
@@ -607,6 +625,8 @@ fail_platform_driver:
607 625
608static void __exit dell_smbios_exit(void) 626static void __exit dell_smbios_exit(void)
609{ 627{
628 exit_dell_smbios_wmi();
629 exit_dell_smbios_smm();
610 mutex_lock(&smbios_mutex); 630 mutex_lock(&smbios_mutex);
611 if (platform_device) { 631 if (platform_device) {
612 free_group(platform_device); 632 free_group(platform_device);
@@ -617,11 +637,12 @@ static void __exit dell_smbios_exit(void)
617 mutex_unlock(&smbios_mutex); 637 mutex_unlock(&smbios_mutex);
618} 638}
619 639
620subsys_initcall(dell_smbios_init); 640module_init(dell_smbios_init);
621module_exit(dell_smbios_exit); 641module_exit(dell_smbios_exit);
622 642
623MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); 643MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
624MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); 644MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
625MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); 645MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
646MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
626MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); 647MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
627MODULE_LICENSE("GPL"); 648MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index 89f65c4651a0..e9e9da556318 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
58}; 58};
59MODULE_DEVICE_TABLE(dmi, dell_device_table); 59MODULE_DEVICE_TABLE(dmi, dell_device_table);
60 60
61static void __init parse_da_table(const struct dmi_header *dm) 61static void parse_da_table(const struct dmi_header *dm)
62{ 62{
63 struct calling_interface_structure *table = 63 struct calling_interface_structure *table =
64 container_of(dm, struct calling_interface_structure, header); 64 container_of(dm, struct calling_interface_structure, header);
@@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
73 da_command_code = table->cmdIOCode; 73 da_command_code = table->cmdIOCode;
74} 74}
75 75
76static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) 76static void find_cmd_address(const struct dmi_header *dm, void *dummy)
77{ 77{
78 switch (dm->type) { 78 switch (dm->type) {
79 case 0xda: /* Calling interface */ 79 case 0xda: /* Calling interface */
@@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void)
128 return false; 128 return false;
129} 129}
130 130
131static int __init dell_smbios_smm_init(void) 131int init_dell_smbios_smm(void)
132{ 132{
133 int ret; 133 int ret;
134 /* 134 /*
@@ -176,7 +176,7 @@ fail_platform_device_alloc:
176 return ret; 176 return ret;
177} 177}
178 178
179static void __exit dell_smbios_smm_exit(void) 179void exit_dell_smbios_smm(void)
180{ 180{
181 if (platform_device) { 181 if (platform_device) {
182 dell_smbios_unregister_device(&platform_device->dev); 182 dell_smbios_unregister_device(&platform_device->dev);
@@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void)
184 free_page((unsigned long)buffer); 184 free_page((unsigned long)buffer);
185 } 185 }
186} 186}
187
188subsys_initcall(dell_smbios_smm_init);
189module_exit(dell_smbios_smm_exit);
190
191MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
192MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
193MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
194MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
195MODULE_DESCRIPTION("Dell SMBIOS communications over SMI");
196MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 609557aa5868..fbefedb1c172 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
228 { }, 228 { },
229}; 229};
230 230
231static void __init parse_b1_table(const struct dmi_header *dm) 231static void parse_b1_table(const struct dmi_header *dm)
232{ 232{
233 struct misc_bios_flags_structure *flags = 233 struct misc_bios_flags_structure *flags =
234 container_of(dm, struct misc_bios_flags_structure, header); 234 container_of(dm, struct misc_bios_flags_structure, header);
@@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm)
242 wmi_supported = 1; 242 wmi_supported = 1;
243} 243}
244 244
245static void __init find_b1(const struct dmi_header *dm, void *dummy) 245static void find_b1(const struct dmi_header *dm, void *dummy)
246{ 246{
247 switch (dm->type) { 247 switch (dm->type) {
248 case 0xb1: /* misc bios flags */ 248 case 0xb1: /* misc bios flags */
@@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = {
261 .filter_callback = dell_smbios_wmi_filter, 261 .filter_callback = dell_smbios_wmi_filter,
262}; 262};
263 263
264static int __init init_dell_smbios_wmi(void) 264int init_dell_smbios_wmi(void)
265{ 265{
266 dmi_walk(find_b1, NULL); 266 dmi_walk(find_b1, NULL);
267 267
@@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void)
271 return wmi_driver_register(&dell_smbios_wmi_driver); 271 return wmi_driver_register(&dell_smbios_wmi_driver);
272} 272}
273 273
274static void __exit exit_dell_smbios_wmi(void) 274void exit_dell_smbios_wmi(void)
275{ 275{
276 wmi_driver_unregister(&dell_smbios_wmi_driver); 276 wmi_driver_unregister(&dell_smbios_wmi_driver);
277} 277}
278 278
279module_init(init_dell_smbios_wmi);
280module_exit(exit_dell_smbios_wmi);
281
282MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); 279MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
283MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
284MODULE_DESCRIPTION("Dell SMBIOS communications over WMI");
285MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index 138d478d9adc..d8adaf959740 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb);
75int dell_laptop_unregister_notifier(struct notifier_block *nb); 75int dell_laptop_unregister_notifier(struct notifier_block *nb);
76void dell_laptop_call_notifier(unsigned long action, void *data); 76void dell_laptop_call_notifier(unsigned long action, void *data);
77 77
78#endif 78/* for the supported backends */
79#ifdef CONFIG_DELL_SMBIOS_WMI
80int init_dell_smbios_wmi(void);
81void exit_dell_smbios_wmi(void);
82#else /* CONFIG_DELL_SMBIOS_WMI */
83static inline int init_dell_smbios_wmi(void)
84{
85 return -ENODEV;
86}
87static inline void exit_dell_smbios_wmi(void)
88{}
89#endif /* CONFIG_DELL_SMBIOS_WMI */
90
91#ifdef CONFIG_DELL_SMBIOS_SMM
92int init_dell_smbios_smm(void);
93void exit_dell_smbios_smm(void);
94#else /* CONFIG_DELL_SMBIOS_SMM */
95static inline int init_dell_smbios_smm(void)
96{
97 return -ENODEV;
98}
99static inline void exit_dell_smbios_smm(void)
100{}
101#endif /* CONFIG_DELL_SMBIOS_SMM */
102
103#endif /* _DELL_SMBIOS_H_ */
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 2c9927430d85..8d102195a392 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -714,7 +714,7 @@ static int __init dell_wmi_init(void)
714 714
715 return wmi_driver_register(&dell_wmi_driver); 715 return wmi_driver_register(&dell_wmi_driver);
716} 716}
717module_init(dell_wmi_init); 717late_initcall(dell_wmi_init);
718 718
719static void __exit dell_wmi_exit(void) 719static void __exit dell_wmi_exit(void)
720{ 720{
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index d1a01311c1a2..5e3df194723e 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device)
376{ 376{
377 acpi_handle handle = ACPI_HANDLE(&device->dev); 377 acpi_handle handle = ACPI_HANDLE(&device->dev);
378 378
379 device_init_wakeup(&device->dev, false);
379 acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); 380 acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
380 intel_hid_set_enable(&device->dev, false); 381 intel_hid_set_enable(&device->dev, false);
381 intel_button_array_enable(&device->dev, false); 382 intel_button_array_enable(&device->dev, false);
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index b703d6f5b099..c13780b8dabb 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/acpi.h> 9#include <linux/acpi.h>
10#include <linux/dmi.h>
10#include <linux/input.h> 11#include <linux/input.h>
11#include <linux/input/sparse-keymap.h> 12#include <linux/input/sparse-keymap.h>
12#include <linux/kernel.h> 13#include <linux/kernel.h>
@@ -97,9 +98,35 @@ out_unknown:
97 dev_dbg(&device->dev, "unknown event index 0x%x\n", event); 98 dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
98} 99}
99 100
100static int intel_vbtn_probe(struct platform_device *device) 101static void detect_tablet_mode(struct platform_device *device)
101{ 102{
103 const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
104 struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
105 acpi_handle handle = ACPI_HANDLE(&device->dev);
102 struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; 106 struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
107 union acpi_object *obj;
108 acpi_status status;
109 int m;
110
111 if (!(chassis_type && strcmp(chassis_type, "31") == 0))
112 goto out;
113
114 status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
115 if (ACPI_FAILURE(status))
116 goto out;
117
118 obj = vgbs_output.pointer;
119 if (!(obj && obj->type == ACPI_TYPE_INTEGER))
120 goto out;
121
122 m = !(obj->integer.value & TABLET_MODE_FLAG);
123 input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
124out:
125 kfree(vgbs_output.pointer);
126}
127
128static int intel_vbtn_probe(struct platform_device *device)
129{
103 acpi_handle handle = ACPI_HANDLE(&device->dev); 130 acpi_handle handle = ACPI_HANDLE(&device->dev);
104 struct intel_vbtn_priv *priv; 131 struct intel_vbtn_priv *priv;
105 acpi_status status; 132 acpi_status status;
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device)
122 return err; 149 return err;
123 } 150 }
124 151
125 /* 152 detect_tablet_mode(device);
126 * VGBS being present and returning something means we have
127 * a tablet mode switch.
128 */
129 status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
130 if (ACPI_SUCCESS(status)) {
131 union acpi_object *obj = vgbs_output.pointer;
132
133 if (obj && obj->type == ACPI_TYPE_INTEGER) {
134 int m = !(obj->integer.value & TABLET_MODE_FLAG);
135
136 input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
137 }
138 }
139
140 kfree(vgbs_output.pointer);
141 153
142 status = acpi_install_notify_handler(handle, 154 status = acpi_install_notify_handler(handle,
143 ACPI_DEVICE_NOTIFY, 155 ACPI_DEVICE_NOTIFY,
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device)
154{ 166{
155 acpi_handle handle = ACPI_HANDLE(&device->dev); 167 acpi_handle handle = ACPI_HANDLE(&device->dev);
156 168
169 device_init_wakeup(&device->dev, false);
157 acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); 170 acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
158 171
159 /* 172 /*
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index c0c8945603cb..8796211ef24a 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev)
945 wblock->char_dev.mode = 0444; 945 wblock->char_dev.mode = 0444;
946 ret = misc_register(&wblock->char_dev); 946 ret = misc_register(&wblock->char_dev);
947 if (ret) { 947 if (ret) {
948 dev_warn(dev, "failed to register char dev: %d", ret); 948 dev_warn(dev, "failed to register char dev: %d\n", ret);
949 ret = -ENOMEM; 949 ret = -ENOMEM;
950 goto probe_misc_failure; 950 goto probe_misc_failure;
951 } 951 }
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
1048 1048
1049 if (result) { 1049 if (result) {
1050 dev_warn(wmi_bus_dev, 1050 dev_warn(wmi_bus_dev,
1051 "%s data block query control method not found", 1051 "%s data block query control method not found\n",
1052 method); 1052 method);
1053 return result; 1053 return result;
1054 } 1054 }
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
1198 1198
1199 retval = device_add(&wblock->dev.dev); 1199 retval = device_add(&wblock->dev.dev);
1200 if (retval) { 1200 if (retval) {
1201 dev_err(wmi_bus_dev, "failed to register %pULL\n", 1201 dev_err(wmi_bus_dev, "failed to register %pUL\n",
1202 wblock->gblock.guid); 1202 wblock->gblock.guid);
1203 if (debug_event) 1203 if (debug_event)
1204 wmi_method_enable(wblock, 0); 1204 wmi_method_enable(wblock, 0);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index dd4708c58480..1fc0c0811da4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4310,7 +4310,7 @@ static int _regulator_resume_early(struct device *dev, void *data)
4310 4310
4311 rstate = regulator_get_suspend_state(rdev, *state); 4311 rstate = regulator_get_suspend_state(rdev, *state);
4312 if (rstate == NULL) 4312 if (rstate == NULL)
4313 return -EINVAL; 4313 return 0;
4314 4314
4315 mutex_lock(&rdev->mutex); 4315 mutex_lock(&rdev->mutex);
4316 4316
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 72c8b3e1022b..e0a9c445ed67 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
51 * arbitrary timeout. 51 * arbitrary timeout.
52 */ 52 */
53 ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, 53 ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
54 !(val & STM32_VRR), 650, 10000); 54 val & STM32_VRR, 650, 10000);
55 if (ret) { 55 if (ret) {
56 dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); 56 dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
57 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); 57 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a7c15f0085e2..ecef8e73d40b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2581,8 +2581,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
2581 case DASD_CQR_QUEUED: 2581 case DASD_CQR_QUEUED:
2582 /* request was not started - just set to cleared */ 2582 /* request was not started - just set to cleared */
2583 cqr->status = DASD_CQR_CLEARED; 2583 cqr->status = DASD_CQR_CLEARED;
2584 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
2585 cqr->callback_data = DASD_SLEEPON_END_TAG;
2586 break; 2584 break;
2587 case DASD_CQR_IN_IO: 2585 case DASD_CQR_IN_IO:
2588 /* request in IO - terminate IO and release again */ 2586 /* request in IO - terminate IO and release again */
@@ -3902,9 +3900,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3902 wait_event(dasd_flush_wq, 3900 wait_event(dasd_flush_wq,
3903 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3901 (cqr->status != DASD_CQR_CLEAR_PENDING));
3904 3902
3905 /* mark sleepon requests as ended */ 3903 /*
3906 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 3904 * requeue requests to blocklayer will only work
3907 cqr->callback_data = DASD_SLEEPON_END_TAG; 3905 * for block device requests
3906 */
3907 if (_dasd_requeue_request(cqr))
3908 continue;
3908 3909
3909 /* remove requests from device and block queue */ 3910 /* remove requests from device and block queue */
3910 list_del_init(&cqr->devlist); 3911 list_del_init(&cqr->devlist);
@@ -3917,13 +3918,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3917 cqr = refers; 3918 cqr = refers;
3918 } 3919 }
3919 3920
3920 /*
3921 * requeue requests to blocklayer will only work
3922 * for block device requests
3923 */
3924 if (_dasd_requeue_request(cqr))
3925 continue;
3926
3927 if (cqr->block) 3921 if (cqr->block)
3928 list_del_init(&cqr->blocklist); 3922 list_del_init(&cqr->blocklist);
3929 cqr->block->base->discipline->free_cp( 3923 cqr->block->base->discipline->free_cp(
@@ -3940,8 +3934,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3940 list_splice_tail(&requeue_queue, &device->ccw_queue); 3934 list_splice_tail(&requeue_queue, &device->ccw_queue);
3941 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3935 spin_unlock_irq(get_ccwdev_lock(device->cdev));
3942 } 3936 }
3943 /* wake up generic waitqueue for eventually ended sleepon requests */ 3937 dasd_schedule_device_bh(device);
3944 wake_up(&generic_waitq);
3945 return rc; 3938 return rc;
3946} 3939}
3947 3940
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1319122e9d12..9169af7dbb43 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -795,6 +795,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
795 795
796 ccw_device_set_timeout(cdev, 0); 796 ccw_device_set_timeout(cdev, 0);
797 cdev->private->iretry = 255; 797 cdev->private->iretry = 255;
798 cdev->private->async_kill_io_rc = -ETIMEDOUT;
798 ret = ccw_device_cancel_halt_clear(cdev); 799 ret = ccw_device_cancel_halt_clear(cdev);
799 if (ret == -EBUSY) { 800 if (ret == -EBUSY) {
800 ccw_device_set_timeout(cdev, 3*HZ); 801 ccw_device_set_timeout(cdev, 3*HZ);
@@ -871,7 +872,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
871 /* OK, i/o is dead now. Call interrupt handler. */ 872 /* OK, i/o is dead now. Call interrupt handler. */
872 if (cdev->handler) 873 if (cdev->handler)
873 cdev->handler(cdev, cdev->private->intparm, 874 cdev->handler(cdev, cdev->private->intparm,
874 ERR_PTR(-EIO)); 875 ERR_PTR(cdev->private->async_kill_io_rc));
875} 876}
876 877
877static void 878static void
@@ -888,14 +889,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
888 ccw_device_online_verify(cdev, 0); 889 ccw_device_online_verify(cdev, 0);
889 if (cdev->handler) 890 if (cdev->handler)
890 cdev->handler(cdev, cdev->private->intparm, 891 cdev->handler(cdev, cdev->private->intparm,
891 ERR_PTR(-EIO)); 892 ERR_PTR(cdev->private->async_kill_io_rc));
892} 893}
893 894
894void ccw_device_kill_io(struct ccw_device *cdev) 895void ccw_device_kill_io(struct ccw_device *cdev)
895{ 896{
896 int ret; 897 int ret;
897 898
899 ccw_device_set_timeout(cdev, 0);
898 cdev->private->iretry = 255; 900 cdev->private->iretry = 255;
901 cdev->private->async_kill_io_rc = -EIO;
899 ret = ccw_device_cancel_halt_clear(cdev); 902 ret = ccw_device_cancel_halt_clear(cdev);
900 if (ret == -EBUSY) { 903 if (ret == -EBUSY) {
901 ccw_device_set_timeout(cdev, 3*HZ); 904 ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 1caf6a398760..75ce12a24dc2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -159,7 +159,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
159} 159}
160 160
161/** 161/**
162 * ccw_device_start_key() - start a s390 channel program with key 162 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
163 * @cdev: target ccw device 163 * @cdev: target ccw device
164 * @cpa: logical start address of channel program 164 * @cpa: logical start address of channel program
165 * @intparm: user specific interruption parameter; will be presented back to 165 * @intparm: user specific interruption parameter; will be presented back to
@@ -170,10 +170,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
170 * @key: storage key to be used for the I/O 170 * @key: storage key to be used for the I/O
171 * @flags: additional flags; defines the action to be performed for I/O 171 * @flags: additional flags; defines the action to be performed for I/O
172 * processing. 172 * processing.
173 * @expires: timeout value in jiffies
173 * 174 *
174 * Start a S/390 channel program. When the interrupt arrives, the 175 * Start a S/390 channel program. When the interrupt arrives, the
175 * IRQ handler is called, either immediately, delayed (dev-end missing, 176 * IRQ handler is called, either immediately, delayed (dev-end missing,
176 * or sense required) or never (no IRQ handler registered). 177 * or sense required) or never (no IRQ handler registered).
178 * This function notifies the device driver if the channel program has not
179 * completed during the time specified by @expires. If a timeout occurs, the
180 * channel program is terminated via xsch, hsch or csch, and the device's
181 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
177 * Returns: 182 * Returns:
178 * %0, if the operation was successful; 183 * %0, if the operation was successful;
179 * -%EBUSY, if the device is busy, or status pending; 184 * -%EBUSY, if the device is busy, or status pending;
@@ -182,9 +187,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
182 * Context: 187 * Context:
183 * Interrupts disabled, ccw device lock held 188 * Interrupts disabled, ccw device lock held
184 */ 189 */
185int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, 190int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
186 unsigned long intparm, __u8 lpm, __u8 key, 191 unsigned long intparm, __u8 lpm, __u8 key,
187 unsigned long flags) 192 unsigned long flags, int expires)
188{ 193{
189 struct subchannel *sch; 194 struct subchannel *sch;
190 int ret; 195 int ret;
@@ -224,6 +229,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
224 switch (ret) { 229 switch (ret) {
225 case 0: 230 case 0:
226 cdev->private->intparm = intparm; 231 cdev->private->intparm = intparm;
232 if (expires)
233 ccw_device_set_timeout(cdev, expires);
227 break; 234 break;
228 case -EACCES: 235 case -EACCES:
229 case -ENODEV: 236 case -ENODEV:
@@ -234,7 +241,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
234} 241}
235 242
236/** 243/**
237 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key 244 * ccw_device_start_key() - start a s390 channel program with key
238 * @cdev: target ccw device 245 * @cdev: target ccw device
239 * @cpa: logical start address of channel program 246 * @cpa: logical start address of channel program
240 * @intparm: user specific interruption parameter; will be presented back to 247 * @intparm: user specific interruption parameter; will be presented back to
@@ -245,15 +252,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
245 * @key: storage key to be used for the I/O 252 * @key: storage key to be used for the I/O
246 * @flags: additional flags; defines the action to be performed for I/O 253 * @flags: additional flags; defines the action to be performed for I/O
247 * processing. 254 * processing.
248 * @expires: timeout value in jiffies
249 * 255 *
250 * Start a S/390 channel program. When the interrupt arrives, the 256 * Start a S/390 channel program. When the interrupt arrives, the
251 * IRQ handler is called, either immediately, delayed (dev-end missing, 257 * IRQ handler is called, either immediately, delayed (dev-end missing,
252 * or sense required) or never (no IRQ handler registered). 258 * or sense required) or never (no IRQ handler registered).
253 * This function notifies the device driver if the channel program has not
254 * completed during the time specified by @expires. If a timeout occurs, the
255 * channel program is terminated via xsch, hsch or csch, and the device's
256 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
257 * Returns: 259 * Returns:
258 * %0, if the operation was successful; 260 * %0, if the operation was successful;
259 * -%EBUSY, if the device is busy, or status pending; 261 * -%EBUSY, if the device is busy, or status pending;
@@ -262,19 +264,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
262 * Context: 264 * Context:
263 * Interrupts disabled, ccw device lock held 265 * Interrupts disabled, ccw device lock held
264 */ 266 */
265int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, 267int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
266 unsigned long intparm, __u8 lpm, __u8 key, 268 unsigned long intparm, __u8 lpm, __u8 key,
267 unsigned long flags, int expires) 269 unsigned long flags)
268{ 270{
269 int ret; 271 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
270 272 flags, 0);
271 if (!cdev)
272 return -ENODEV;
273 ccw_device_set_timeout(cdev, expires);
274 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
275 if (ret != 0)
276 ccw_device_set_timeout(cdev, 0);
277 return ret;
278} 273}
279 274
280/** 275/**
@@ -489,18 +484,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
489EXPORT_SYMBOL(ccw_device_get_id); 484EXPORT_SYMBOL(ccw_device_get_id);
490 485
491/** 486/**
492 * ccw_device_tm_start_key() - perform start function 487 * ccw_device_tm_start_timeout_key() - perform start function
493 * @cdev: ccw device on which to perform the start function 488 * @cdev: ccw device on which to perform the start function
494 * @tcw: transport-command word to be started 489 * @tcw: transport-command word to be started
495 * @intparm: user defined parameter to be passed to the interrupt handler 490 * @intparm: user defined parameter to be passed to the interrupt handler
496 * @lpm: mask of paths to use 491 * @lpm: mask of paths to use
497 * @key: storage key to use for storage access 492 * @key: storage key to use for storage access
493 * @expires: time span in jiffies after which to abort request
498 * 494 *
499 * Start the tcw on the given ccw device. Return zero on success, non-zero 495 * Start the tcw on the given ccw device. Return zero on success, non-zero
500 * otherwise. 496 * otherwise.
501 */ 497 */
502int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, 498int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
503 unsigned long intparm, u8 lpm, u8 key) 499 unsigned long intparm, u8 lpm, u8 key,
500 int expires)
504{ 501{
505 struct subchannel *sch; 502 struct subchannel *sch;
506 int rc; 503 int rc;
@@ -527,37 +524,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
527 return -EACCES; 524 return -EACCES;
528 } 525 }
529 rc = cio_tm_start_key(sch, tcw, lpm, key); 526 rc = cio_tm_start_key(sch, tcw, lpm, key);
530 if (rc == 0) 527 if (rc == 0) {
531 cdev->private->intparm = intparm; 528 cdev->private->intparm = intparm;
529 if (expires)
530 ccw_device_set_timeout(cdev, expires);
531 }
532 return rc; 532 return rc;
533} 533}
534EXPORT_SYMBOL(ccw_device_tm_start_key); 534EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
535 535
536/** 536/**
537 * ccw_device_tm_start_timeout_key() - perform start function 537 * ccw_device_tm_start_key() - perform start function
538 * @cdev: ccw device on which to perform the start function 538 * @cdev: ccw device on which to perform the start function
539 * @tcw: transport-command word to be started 539 * @tcw: transport-command word to be started
540 * @intparm: user defined parameter to be passed to the interrupt handler 540 * @intparm: user defined parameter to be passed to the interrupt handler
541 * @lpm: mask of paths to use 541 * @lpm: mask of paths to use
542 * @key: storage key to use for storage access 542 * @key: storage key to use for storage access
543 * @expires: time span in jiffies after which to abort request
544 * 543 *
545 * Start the tcw on the given ccw device. Return zero on success, non-zero 544 * Start the tcw on the given ccw device. Return zero on success, non-zero
546 * otherwise. 545 * otherwise.
547 */ 546 */
548int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, 547int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
549 unsigned long intparm, u8 lpm, u8 key, 548 unsigned long intparm, u8 lpm, u8 key)
550 int expires)
551{ 549{
552 int ret; 550 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
553
554 ccw_device_set_timeout(cdev, expires);
555 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
556 if (ret != 0)
557 ccw_device_set_timeout(cdev, 0);
558 return ret;
559} 551}
560EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); 552EXPORT_SYMBOL(ccw_device_tm_start_key);
561 553
562/** 554/**
563 * ccw_device_tm_start() - perform start function 555 * ccw_device_tm_start() - perform start function
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index af571d8d6925..90e4e3a7841b 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -157,6 +157,7 @@ struct ccw_device_private {
157 unsigned long intparm; /* user interruption parameter */ 157 unsigned long intparm; /* user interruption parameter */
158 struct qdio_irq *qdio_data; 158 struct qdio_irq *qdio_data;
159 struct irb irb; /* device status */ 159 struct irb irb; /* device status */
160 int async_kill_io_rc;
160 struct senseid senseid; /* SenseID info */ 161 struct senseid senseid; /* SenseID info */
161 struct pgid pgid[8]; /* path group IDs per chpid*/ 162 struct pgid pgid[8]; /* path group IDs per chpid*/
162 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 163 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index ca72f3311004..c8b308cfabf1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2134,24 +2134,25 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2134 } 2134 }
2135 reply->callback = reply_cb; 2135 reply->callback = reply_cb;
2136 reply->param = reply_param; 2136 reply->param = reply_param;
2137 if (card->state == CARD_STATE_DOWN) 2137
2138 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2139 else
2140 reply->seqno = card->seqno.ipa++;
2141 init_waitqueue_head(&reply->wait_q); 2138 init_waitqueue_head(&reply->wait_q);
2142 spin_lock_irqsave(&card->lock, flags);
2143 list_add_tail(&reply->list, &card->cmd_waiter_list);
2144 spin_unlock_irqrestore(&card->lock, flags);
2145 2139
2146 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; 2140 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
2147 qeth_prepare_control_data(card, len, iob);
2148 2141
2149 if (IS_IPA(iob->data)) { 2142 if (IS_IPA(iob->data)) {
2150 cmd = __ipa_cmd(iob); 2143 cmd = __ipa_cmd(iob);
2144 cmd->hdr.seqno = card->seqno.ipa++;
2145 reply->seqno = cmd->hdr.seqno;
2151 event_timeout = QETH_IPA_TIMEOUT; 2146 event_timeout = QETH_IPA_TIMEOUT;
2152 } else { 2147 } else {
2148 reply->seqno = QETH_IDX_COMMAND_SEQNO;
2153 event_timeout = QETH_TIMEOUT; 2149 event_timeout = QETH_TIMEOUT;
2154 } 2150 }
2151 qeth_prepare_control_data(card, len, iob);
2152
2153 spin_lock_irqsave(&card->lock, flags);
2154 list_add_tail(&reply->list, &card->cmd_waiter_list);
2155 spin_unlock_irqrestore(&card->lock, flags);
2155 2156
2156 timeout = jiffies + event_timeout; 2157 timeout = jiffies + event_timeout;
2157 2158
@@ -2933,7 +2934,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
2933 memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); 2934 memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
2934 cmd->hdr.command = command; 2935 cmd->hdr.command = command;
2935 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; 2936 cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
2936 cmd->hdr.seqno = card->seqno.ipa; 2937 /* cmd->hdr.seqno is set by qeth_send_control_data() */
2937 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); 2938 cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
2938 cmd->hdr.rel_adapter_no = (__u8) card->info.portno; 2939 cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
2939 if (card->options.layer2) 2940 if (card->options.layer2)
@@ -3898,10 +3899,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
3898int qeth_get_elements_no(struct qeth_card *card, 3899int qeth_get_elements_no(struct qeth_card *card,
3899 struct sk_buff *skb, int extra_elems, int data_offset) 3900 struct sk_buff *skb, int extra_elems, int data_offset)
3900{ 3901{
3901 int elements = qeth_get_elements_for_range( 3902 addr_t end = (addr_t)skb->data + skb_headlen(skb);
3902 (addr_t)skb->data + data_offset, 3903 int elements = qeth_get_elements_for_frags(skb);
3903 (addr_t)skb->data + skb_headlen(skb)) + 3904 addr_t start = (addr_t)skb->data + data_offset;
3904 qeth_get_elements_for_frags(skb); 3905
3906 if (start != end)
3907 elements += qeth_get_elements_for_range(start, end);
3905 3908
3906 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 3909 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
3907 QETH_DBF_MESSAGE(2, "Invalid size of IP packet " 3910 QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index bdd45f4dcace..498fe9af2cdb 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -40,8 +40,40 @@ struct qeth_ipaddr {
40 unsigned int pfxlen; 40 unsigned int pfxlen;
41 } a6; 41 } a6;
42 } u; 42 } u;
43
44}; 43};
44
45static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
46 struct qeth_ipaddr *a2)
47{
48 if (a1->proto != a2->proto)
49 return false;
50 if (a1->proto == QETH_PROT_IPV6)
51 return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
52 return a1->u.a4.addr == a2->u.a4.addr;
53}
54
55static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
56 struct qeth_ipaddr *a2)
57{
58 /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
59 * so 'proto' and 'addr' match for sure.
60 *
61 * For ucast:
62 * - 'mac' is always 0.
63 * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
64 * values are required to avoid mixups in takeover eligibility.
65 *
66 * For mcast,
67 * - 'mac' is mapped from the IP, and thus always matches.
68 * - 'mask'/'pfxlen' is always 0.
69 */
70 if (a1->type != a2->type)
71 return false;
72 if (a1->proto == QETH_PROT_IPV6)
73 return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
74 return a1->u.a4.mask == a2->u.a4.mask;
75}
76
45static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) 77static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
46{ 78{
47 u64 ret = 0; 79 u64 ret = 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b0c888e86cd4..962a04b68dd2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
67 qeth_l3_ipaddr6_to_string(addr, buf); 67 qeth_l3_ipaddr6_to_string(addr, buf);
68} 68}
69 69
70static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
71 struct qeth_ipaddr *query)
72{
73 u64 key = qeth_l3_ipaddr_hash(query);
74 struct qeth_ipaddr *addr;
75
76 if (query->is_multicast) {
77 hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
78 if (qeth_l3_addr_match_ip(addr, query))
79 return addr;
80 } else {
81 hash_for_each_possible(card->ip_htable, addr, hnode, key)
82 if (qeth_l3_addr_match_ip(addr, query))
83 return addr;
84 }
85 return NULL;
86}
87
70static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 88static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
71{ 89{
72 int i, j; 90 int i, j;
@@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
120 return rc; 138 return rc;
121} 139}
122 140
123inline int
124qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
125{
126 return addr1->proto == addr2->proto &&
127 !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
128 ether_addr_equal_64bits(addr1->mac, addr2->mac);
129}
130
131static struct qeth_ipaddr *
132qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
133{
134 struct qeth_ipaddr *addr;
135
136 if (tmp_addr->is_multicast) {
137 hash_for_each_possible(card->ip_mc_htable, addr,
138 hnode, qeth_l3_ipaddr_hash(tmp_addr))
139 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
140 return addr;
141 } else {
142 hash_for_each_possible(card->ip_htable, addr,
143 hnode, qeth_l3_ipaddr_hash(tmp_addr))
144 if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
145 return addr;
146 }
147
148 return NULL;
149}
150
151int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) 141int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
152{ 142{
153 int rc = 0; 143 int rc = 0;
@@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
162 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 152 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
163 } 153 }
164 154
165 addr = qeth_l3_ip_from_hash(card, tmp_addr); 155 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
166 if (!addr) 156 if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
167 return -ENOENT; 157 return -ENOENT;
168 158
169 addr->ref_counter--; 159 addr->ref_counter--;
170 if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || 160 if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
171 addr->type == QETH_IP_TYPE_RXIP))
172 return rc; 161 return rc;
173 if (addr->in_progress) 162 if (addr->in_progress)
174 return -EINPROGRESS; 163 return -EINPROGRESS;
175 164
176 if (!qeth_card_hw_is_reachable(card)) { 165 if (qeth_card_hw_is_reachable(card))
177 addr->disp_flag = QETH_DISP_ADDR_DELETE; 166 rc = qeth_l3_deregister_addr_entry(card, addr);
178 return 0;
179 }
180
181 rc = qeth_l3_deregister_addr_entry(card, addr);
182 167
183 hash_del(&addr->hnode); 168 hash_del(&addr->hnode);
184 kfree(addr); 169 kfree(addr);
@@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
190{ 175{
191 int rc = 0; 176 int rc = 0;
192 struct qeth_ipaddr *addr; 177 struct qeth_ipaddr *addr;
178 char buf[40];
193 179
194 QETH_CARD_TEXT(card, 4, "addip"); 180 QETH_CARD_TEXT(card, 4, "addip");
195 181
@@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
200 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); 186 QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
201 } 187 }
202 188
203 addr = qeth_l3_ip_from_hash(card, tmp_addr); 189 addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
204 if (!addr) { 190 if (addr) {
191 if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
192 return -EADDRINUSE;
193 if (qeth_l3_addr_match_all(addr, tmp_addr)) {
194 addr->ref_counter++;
195 return 0;
196 }
197 qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
198 buf);
199 dev_warn(&card->gdev->dev,
200 "Registering IP address %s failed\n", buf);
201 return -EADDRINUSE;
202 } else {
205 addr = qeth_l3_get_addr_buffer(tmp_addr->proto); 203 addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
206 if (!addr) 204 if (!addr)
207 return -ENOMEM; 205 return -ENOMEM;
@@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
241 (rc == IPA_RC_LAN_OFFLINE)) { 239 (rc == IPA_RC_LAN_OFFLINE)) {
242 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 240 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
243 if (addr->ref_counter < 1) { 241 if (addr->ref_counter < 1) {
244 qeth_l3_delete_ip(card, addr); 242 qeth_l3_deregister_addr_entry(card, addr);
243 hash_del(&addr->hnode);
245 kfree(addr); 244 kfree(addr);
246 } 245 }
247 } else { 246 } else {
248 hash_del(&addr->hnode); 247 hash_del(&addr->hnode);
249 kfree(addr); 248 kfree(addr);
250 } 249 }
251 } else {
252 if (addr->type == QETH_IP_TYPE_NORMAL ||
253 addr->type == QETH_IP_TYPE_RXIP)
254 addr->ref_counter++;
255 } 250 }
256
257 return rc; 251 return rc;
258} 252}
259 253
@@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
321 spin_lock_bh(&card->ip_lock); 315 spin_lock_bh(&card->ip_lock);
322 316
323 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 317 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
324 if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { 318 if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
325 qeth_l3_deregister_addr_entry(card, addr);
326 hash_del(&addr->hnode);
327 kfree(addr);
328 } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
329 if (addr->proto == QETH_PROT_IPV4) { 319 if (addr->proto == QETH_PROT_IPV4) {
330 addr->in_progress = 1; 320 addr->in_progress = 1;
331 spin_unlock_bh(&card->ip_lock); 321 spin_unlock_bh(&card->ip_lock);
@@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
643 return -ENOMEM; 633 return -ENOMEM;
644 634
645 spin_lock_bh(&card->ip_lock); 635 spin_lock_bh(&card->ip_lock);
646 636 rc = qeth_l3_add_ip(card, ipaddr);
647 if (qeth_l3_ip_from_hash(card, ipaddr))
648 rc = -EEXIST;
649 else
650 rc = qeth_l3_add_ip(card, ipaddr);
651
652 spin_unlock_bh(&card->ip_lock); 637 spin_unlock_bh(&card->ip_lock);
653 638
654 kfree(ipaddr); 639 kfree(ipaddr);
@@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
713 return -ENOMEM; 698 return -ENOMEM;
714 699
715 spin_lock_bh(&card->ip_lock); 700 spin_lock_bh(&card->ip_lock);
716 701 rc = qeth_l3_add_ip(card, ipaddr);
717 if (qeth_l3_ip_from_hash(card, ipaddr))
718 rc = -EEXIST;
719 else
720 rc = qeth_l3_add_ip(card, ipaddr);
721
722 spin_unlock_bh(&card->ip_lock); 702 spin_unlock_bh(&card->ip_lock);
723 703
724 kfree(ipaddr); 704 kfree(ipaddr);
@@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
1239 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); 1219 tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
1240 tmp->is_multicast = 1; 1220 tmp->is_multicast = 1;
1241 1221
1242 ipm = qeth_l3_ip_from_hash(card, tmp); 1222 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1243 if (ipm) { 1223 if (ipm) {
1224 /* for mcast, by-IP match means full match */
1244 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1225 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1245 } else { 1226 } else {
1246 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 1227 ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
1319 sizeof(struct in6_addr)); 1300 sizeof(struct in6_addr));
1320 tmp->is_multicast = 1; 1301 tmp->is_multicast = 1;
1321 1302
1322 ipm = qeth_l3_ip_from_hash(card, tmp); 1303 ipm = qeth_l3_find_addr_by_ip(card, tmp);
1323 if (ipm) { 1304 if (ipm) {
1305 /* for mcast, by-IP match means full match */
1324 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 1306 ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
1325 continue; 1307 continue;
1326 } 1308 }
@@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
2450static int qeth_l3_get_elements_no_tso(struct qeth_card *card, 2432static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
2451 struct sk_buff *skb, int extra_elems) 2433 struct sk_buff *skb, int extra_elems)
2452{ 2434{
2453 addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); 2435 addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
2454 int elements = qeth_get_elements_for_range( 2436 addr_t end = (addr_t)skb->data + skb_headlen(skb);
2455 tcpdptr, 2437 int elements = qeth_get_elements_for_frags(skb);
2456 (addr_t)skb->data + skb_headlen(skb)) + 2438
2457 qeth_get_elements_for_frags(skb); 2439 if (start != end)
2440 elements += qeth_get_elements_for_range(start, end);
2458 2441
2459 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { 2442 if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
2460 QETH_DBF_MESSAGE(2, 2443 QETH_DBF_MESSAGE(2,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 57bf43e34863..dd9464920456 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev)
328 if (shost->work_q) 328 if (shost->work_q)
329 destroy_workqueue(shost->work_q); 329 destroy_workqueue(shost->work_q);
330 330
331 destroy_rcu_head(&shost->rcu);
332
333 if (shost->shost_state == SHOST_CREATED) { 331 if (shost->shost_state == SHOST_CREATED) {
334 /* 332 /*
335 * Free the shost_dev device name here if scsi_host_alloc() 333 * Free the shost_dev device name here if scsi_host_alloc()
@@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
404 INIT_LIST_HEAD(&shost->starved_list); 402 INIT_LIST_HEAD(&shost->starved_list);
405 init_waitqueue_head(&shost->host_wait); 403 init_waitqueue_head(&shost->host_wait);
406 mutex_init(&shost->scan_mutex); 404 mutex_init(&shost->scan_mutex);
407 init_rcu_head(&shost->rcu);
408 405
409 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); 406 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
410 if (index < 0) 407 if (index < 0)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 073ced07e662..dc8e850fbfd2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -216,36 +216,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
216/** 216/**
217 * megasas_fire_cmd_fusion - Sends command to the FW 217 * megasas_fire_cmd_fusion - Sends command to the FW
218 * @instance: Adapter soft state 218 * @instance: Adapter soft state
219 * @req_desc: 32bit or 64bit Request descriptor 219 * @req_desc: 64bit Request descriptor
220 * 220 *
221 * Perform PCI Write. Ventura supports 32 bit Descriptor. 221 * Perform PCI Write.
222 * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
223 */ 222 */
224 223
225static void 224static void
226megasas_fire_cmd_fusion(struct megasas_instance *instance, 225megasas_fire_cmd_fusion(struct megasas_instance *instance,
227 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 226 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
228{ 227{
229 if (instance->adapter_type == VENTURA_SERIES)
230 writel(le32_to_cpu(req_desc->u.low),
231 &instance->reg_set->inbound_single_queue_port);
232 else {
233#if defined(writeq) && defined(CONFIG_64BIT) 228#if defined(writeq) && defined(CONFIG_64BIT)
234 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 229 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
235 le32_to_cpu(req_desc->u.low)); 230 le32_to_cpu(req_desc->u.low));
236 231
237 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 232 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
238#else 233#else
239 unsigned long flags; 234 unsigned long flags;
240 spin_lock_irqsave(&instance->hba_lock, flags); 235 spin_lock_irqsave(&instance->hba_lock, flags);
241 writel(le32_to_cpu(req_desc->u.low), 236 writel(le32_to_cpu(req_desc->u.low),
242 &instance->reg_set->inbound_low_queue_port); 237 &instance->reg_set->inbound_low_queue_port);
243 writel(le32_to_cpu(req_desc->u.high), 238 writel(le32_to_cpu(req_desc->u.high),
244 &instance->reg_set->inbound_high_queue_port); 239 &instance->reg_set->inbound_high_queue_port);
245 mmiowb(); 240 mmiowb();
246 spin_unlock_irqrestore(&instance->hba_lock, flags); 241 spin_unlock_irqrestore(&instance->hba_lock, flags);
247#endif 242#endif
248 }
249} 243}
250 244
251/** 245/**
@@ -982,7 +976,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
982 const char *sys_info; 976 const char *sys_info;
983 MFI_CAPABILITIES *drv_ops; 977 MFI_CAPABILITIES *drv_ops;
984 u32 scratch_pad_2; 978 u32 scratch_pad_2;
985 unsigned long flags;
986 ktime_t time; 979 ktime_t time;
987 bool cur_fw_64bit_dma_capable; 980 bool cur_fw_64bit_dma_capable;
988 981
@@ -1121,14 +1114,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1121 break; 1114 break;
1122 } 1115 }
1123 1116
1124 /* For Ventura also IOC INIT required 64 bit Descriptor write. */ 1117 megasas_fire_cmd_fusion(instance, &req_desc);
1125 spin_lock_irqsave(&instance->hba_lock, flags);
1126 writel(le32_to_cpu(req_desc.u.low),
1127 &instance->reg_set->inbound_low_queue_port);
1128 writel(le32_to_cpu(req_desc.u.high),
1129 &instance->reg_set->inbound_high_queue_port);
1130 mmiowb();
1131 spin_unlock_irqrestore(&instance->hba_lock, flags);
1132 1118
1133 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 1119 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
1134 1120
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59a87ca328d3..0aafbfd1b746 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -6297,14 +6297,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
6297} 6297}
6298 6298
6299/** 6299/**
6300 * _wait_for_commands_to_complete - reset controller 6300 * mpt3sas_wait_for_commands_to_complete - reset controller
6301 * @ioc: Pointer to MPT_ADAPTER structure 6301 * @ioc: Pointer to MPT_ADAPTER structure
6302 * 6302 *
6303 * This function is waiting 10s for all pending commands to complete 6303 * This function is waiting 10s for all pending commands to complete
6304 * prior to putting controller in reset. 6304 * prior to putting controller in reset.
6305 */ 6305 */
6306static void 6306void
6307_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 6307mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6308{ 6308{
6309 u32 ioc_state; 6309 u32 ioc_state;
6310 6310
@@ -6377,7 +6377,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6377 is_fault = 1; 6377 is_fault = 1;
6378 } 6378 }
6379 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 6379 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
6380 _wait_for_commands_to_complete(ioc); 6380 mpt3sas_wait_for_commands_to_complete(ioc);
6381 _base_mask_interrupts(ioc); 6381 _base_mask_interrupts(ioc);
6382 r = _base_make_ioc_ready(ioc, type); 6382 r = _base_make_ioc_ready(ioc, type);
6383 if (r) 6383 if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 789bc421424b..99ccf83b8c51 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1433,6 +1433,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
1433 1433
1434int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); 1434int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
1435 1435
1436void
1437mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
1438
1436 1439
1437/* scsih shared API */ 1440/* scsih shared API */
1438struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, 1441struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 74fca184dba9..a1cb0236c550 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2835,7 +2835,8 @@ scsih_abort(struct scsi_cmnd *scmd)
2835 _scsih_tm_display_info(ioc, scmd); 2835 _scsih_tm_display_info(ioc, scmd);
2836 2836
2837 sas_device_priv_data = scmd->device->hostdata; 2837 sas_device_priv_data = scmd->device->hostdata;
2838 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2838 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2839 ioc->remove_host) {
2839 sdev_printk(KERN_INFO, scmd->device, 2840 sdev_printk(KERN_INFO, scmd->device,
2840 "device been deleted! scmd(%p)\n", scmd); 2841 "device been deleted! scmd(%p)\n", scmd);
2841 scmd->result = DID_NO_CONNECT << 16; 2842 scmd->result = DID_NO_CONNECT << 16;
@@ -2898,7 +2899,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2898 _scsih_tm_display_info(ioc, scmd); 2899 _scsih_tm_display_info(ioc, scmd);
2899 2900
2900 sas_device_priv_data = scmd->device->hostdata; 2901 sas_device_priv_data = scmd->device->hostdata;
2901 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2902 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2903 ioc->remove_host) {
2902 sdev_printk(KERN_INFO, scmd->device, 2904 sdev_printk(KERN_INFO, scmd->device,
2903 "device been deleted! scmd(%p)\n", scmd); 2905 "device been deleted! scmd(%p)\n", scmd);
2904 scmd->result = DID_NO_CONNECT << 16; 2906 scmd->result = DID_NO_CONNECT << 16;
@@ -2961,7 +2963,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
2961 _scsih_tm_display_info(ioc, scmd); 2963 _scsih_tm_display_info(ioc, scmd);
2962 2964
2963 sas_device_priv_data = scmd->device->hostdata; 2965 sas_device_priv_data = scmd->device->hostdata;
2964 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2966 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2967 ioc->remove_host) {
2965 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", 2968 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
2966 scmd); 2969 scmd);
2967 scmd->result = DID_NO_CONNECT << 16; 2970 scmd->result = DID_NO_CONNECT << 16;
@@ -3019,7 +3022,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
3019 ioc->name, scmd); 3022 ioc->name, scmd);
3020 scsi_print_command(scmd); 3023 scsi_print_command(scmd);
3021 3024
3022 if (ioc->is_driver_loading) { 3025 if (ioc->is_driver_loading || ioc->remove_host) {
3023 pr_info(MPT3SAS_FMT "Blocking the host reset\n", 3026 pr_info(MPT3SAS_FMT "Blocking the host reset\n",
3024 ioc->name); 3027 ioc->name);
3025 r = FAILED; 3028 r = FAILED;
@@ -4453,7 +4456,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4453 st = scsi_cmd_priv(scmd); 4456 st = scsi_cmd_priv(scmd);
4454 mpt3sas_base_clear_st(ioc, st); 4457 mpt3sas_base_clear_st(ioc, st);
4455 scsi_dma_unmap(scmd); 4458 scsi_dma_unmap(scmd);
4456 if (ioc->pci_error_recovery) 4459 if (ioc->pci_error_recovery || ioc->remove_host)
4457 scmd->result = DID_NO_CONNECT << 16; 4460 scmd->result = DID_NO_CONNECT << 16;
4458 else 4461 else
4459 scmd->result = DID_RESET << 16; 4462 scmd->result = DID_RESET << 16;
@@ -9739,6 +9742,10 @@ static void scsih_remove(struct pci_dev *pdev)
9739 unsigned long flags; 9742 unsigned long flags;
9740 9743
9741 ioc->remove_host = 1; 9744 ioc->remove_host = 1;
9745
9746 mpt3sas_wait_for_commands_to_complete(ioc);
9747 _scsih_flush_running_cmds(ioc);
9748
9742 _scsih_fw_event_cleanup_queue(ioc); 9749 _scsih_fw_event_cleanup_queue(ioc);
9743 9750
9744 spin_lock_irqsave(&ioc->fw_event_lock, flags); 9751 spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@ -9815,6 +9822,10 @@ scsih_shutdown(struct pci_dev *pdev)
9815 unsigned long flags; 9822 unsigned long flags;
9816 9823
9817 ioc->remove_host = 1; 9824 ioc->remove_host = 1;
9825
9826 mpt3sas_wait_for_commands_to_complete(ioc);
9827 _scsih_flush_running_cmds(ioc);
9828
9818 _scsih_fw_event_cleanup_queue(ioc); 9829 _scsih_fw_event_cleanup_queue(ioc);
9819 9830
9820 spin_lock_irqsave(&ioc->fw_event_lock, flags); 9831 spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@ -10547,7 +10558,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10547 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 10558 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10548 "fw_event_%s%d", ioc->driver_name, ioc->id); 10559 "fw_event_%s%d", ioc->driver_name, ioc->id);
10549 ioc->firmware_event_thread = alloc_ordered_workqueue( 10560 ioc->firmware_event_thread = alloc_ordered_workqueue(
10550 ioc->firmware_event_name, WQ_MEM_RECLAIM); 10561 ioc->firmware_event_name, 0);
10551 if (!ioc->firmware_event_thread) { 10562 if (!ioc->firmware_event_thread) {
10552 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 10563 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
10553 ioc->name, __FILE__, __LINE__, __func__); 10564 ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 667d7697ba01..d09afe1b567d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -762,6 +762,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
762 762
763 iscsi_cid = cqe->conn_id; 763 iscsi_cid = cqe->conn_id;
764 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; 764 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
765 if (!qedi_conn) {
766 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
767 "icid not found 0x%x\n", cqe->conn_id);
768 return;
769 }
765 770
766 /* Based on this itt get the corresponding qedi_cmd */ 771 /* Based on this itt get the corresponding qedi_cmd */
767 spin_lock_bh(&qedi_conn->tmf_work_lock); 772 spin_lock_bh(&qedi_conn->tmf_work_lock);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index be7d6824581a..c9689f97c307 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -261,9 +261,9 @@
261struct name_list_extended { 261struct name_list_extended {
262 struct get_name_list_extended *l; 262 struct get_name_list_extended *l;
263 dma_addr_t ldma; 263 dma_addr_t ldma;
264 struct list_head fcports; /* protect by sess_list */ 264 struct list_head fcports;
265 spinlock_t fcports_lock;
265 u32 size; 266 u32 size;
266 u8 sent;
267}; 267};
268/* 268/*
269 * Timeout timer counts in seconds 269 * Timeout timer counts in seconds
@@ -2217,6 +2217,7 @@ typedef struct {
2217 2217
2218/* FCP-4 types */ 2218/* FCP-4 types */
2219#define FC4_TYPE_FCP_SCSI 0x08 2219#define FC4_TYPE_FCP_SCSI 0x08
2220#define FC4_TYPE_NVME 0x28
2220#define FC4_TYPE_OTHER 0x0 2221#define FC4_TYPE_OTHER 0x0
2221#define FC4_TYPE_UNKNOWN 0xff 2222#define FC4_TYPE_UNKNOWN 0xff
2222 2223
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5bf9a59432f6..403fa096f8c8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3179,6 +3179,7 @@ done_free_sp:
3179 sp->free(sp); 3179 sp->free(sp);
3180 fcport->flags &= ~FCF_ASYNC_SENT; 3180 fcport->flags &= ~FCF_ASYNC_SENT;
3181done: 3181done:
3182 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3182 return rval; 3183 return rval;
3183} 3184}
3184 3185
@@ -3370,6 +3371,7 @@ done_free_sp:
3370 sp->free(sp); 3371 sp->free(sp);
3371 fcport->flags &= ~FCF_ASYNC_SENT; 3372 fcport->flags &= ~FCF_ASYNC_SENT;
3372done: 3373done:
3374 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3373 return rval; 3375 return rval;
3374} 3376}
3375 3377
@@ -3971,6 +3973,9 @@ out:
3971 spin_lock_irqsave(&vha->work_lock, flags); 3973 spin_lock_irqsave(&vha->work_lock, flags);
3972 vha->scan.scan_flags &= ~SF_SCANNING; 3974 vha->scan.scan_flags &= ~SF_SCANNING;
3973 spin_unlock_irqrestore(&vha->work_lock, flags); 3975 spin_unlock_irqrestore(&vha->work_lock, flags);
3976
3977 if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled)
3978 qla24xx_async_gpnft(vha, FC4_TYPE_NVME);
3974} 3979}
3975 3980
3976static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) 3981static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 2dea1129d396..00329dda6179 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -213,6 +213,7 @@ done_free_sp:
213 sp->free(sp); 213 sp->free(sp);
214 fcport->flags &= ~FCF_ASYNC_SENT; 214 fcport->flags &= ~FCF_ASYNC_SENT;
215done: 215done:
216 fcport->flags &= ~FCF_ASYNC_ACTIVE;
216 return rval; 217 return rval;
217} 218}
218 219
@@ -263,7 +264,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
263done_free_sp: 264done_free_sp:
264 sp->free(sp); 265 sp->free(sp);
265done: 266done:
266 fcport->flags &= ~FCF_ASYNC_SENT; 267 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
267 return rval; 268 return rval;
268} 269}
269 270
@@ -271,6 +272,7 @@ void
271qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 272qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
272 uint16_t *data) 273 uint16_t *data)
273{ 274{
275 fcport->flags &= ~FCF_ASYNC_ACTIVE;
274 /* Don't re-login in target mode */ 276 /* Don't re-login in target mode */
275 if (!fcport->tgt_session) 277 if (!fcport->tgt_session)
276 qla2x00_mark_device_lost(vha, fcport, 1, 0); 278 qla2x00_mark_device_lost(vha, fcport, 1, 0);
@@ -284,6 +286,7 @@ qla2x00_async_prlo_sp_done(void *s, int res)
284 struct srb_iocb *lio = &sp->u.iocb_cmd; 286 struct srb_iocb *lio = &sp->u.iocb_cmd;
285 struct scsi_qla_host *vha = sp->vha; 287 struct scsi_qla_host *vha = sp->vha;
286 288
289 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
287 if (!test_bit(UNLOADING, &vha->dpc_flags)) 290 if (!test_bit(UNLOADING, &vha->dpc_flags))
288 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 291 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
289 lio->u.logio.data); 292 lio->u.logio.data);
@@ -322,6 +325,7 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
322done_free_sp: 325done_free_sp:
323 sp->free(sp); 326 sp->free(sp);
324done: 327done:
328 fcport->flags &= ~FCF_ASYNC_ACTIVE;
325 return rval; 329 return rval;
326} 330}
327 331
@@ -375,6 +379,8 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
375 "Async done-%s res %x %8phC\n", 379 "Async done-%s res %x %8phC\n",
376 sp->name, res, sp->fcport->port_name); 380 sp->name, res, sp->fcport->port_name);
377 381
382 sp->fcport->flags &= ~FCF_ASYNC_SENT;
383
378 memset(&ea, 0, sizeof(ea)); 384 memset(&ea, 0, sizeof(ea));
379 ea.event = FCME_ADISC_DONE; 385 ea.event = FCME_ADISC_DONE;
380 ea.rc = res; 386 ea.rc = res;
@@ -425,7 +431,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
425done_free_sp: 431done_free_sp:
426 sp->free(sp); 432 sp->free(sp);
427done: 433done:
428 fcport->flags &= ~FCF_ASYNC_SENT; 434 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
429 qla2x00_post_async_adisc_work(vha, fcport, data); 435 qla2x00_post_async_adisc_work(vha, fcport, data);
430 return rval; 436 return rval;
431} 437}
@@ -643,8 +649,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
643 (loop_id & 0x7fff)); 649 (loop_id & 0x7fff));
644 } 650 }
645 651
646 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 652 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
647 vha->gnl.sent = 0;
648 653
649 INIT_LIST_HEAD(&h); 654 INIT_LIST_HEAD(&h);
650 fcport = tf = NULL; 655 fcport = tf = NULL;
@@ -653,12 +658,16 @@ qla24xx_async_gnl_sp_done(void *s, int res)
653 658
654 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 659 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
655 list_del_init(&fcport->gnl_entry); 660 list_del_init(&fcport->gnl_entry);
661 spin_lock(&vha->hw->tgt.sess_lock);
656 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 662 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
663 spin_unlock(&vha->hw->tgt.sess_lock);
657 ea.fcport = fcport; 664 ea.fcport = fcport;
658 665
659 qla2x00_fcport_event_handler(vha, &ea); 666 qla2x00_fcport_event_handler(vha, &ea);
660 } 667 }
668 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
661 669
670 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
662 /* create new fcport if fw has knowledge of new sessions */ 671 /* create new fcport if fw has knowledge of new sessions */
663 for (i = 0; i < n; i++) { 672 for (i = 0; i < n; i++) {
664 port_id_t id; 673 port_id_t id;
@@ -710,18 +719,21 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
710 ql_dbg(ql_dbg_disc, vha, 0x20d9, 719 ql_dbg(ql_dbg_disc, vha, 0x20d9,
711 "Async-gnlist WWPN %8phC \n", fcport->port_name); 720 "Async-gnlist WWPN %8phC \n", fcport->port_name);
712 721
713 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 722 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
723 if (!list_empty(&fcport->gnl_entry)) {
724 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
725 rval = QLA_SUCCESS;
726 goto done;
727 }
728
729 spin_lock(&vha->hw->tgt.sess_lock);
714 fcport->disc_state = DSC_GNL; 730 fcport->disc_state = DSC_GNL;
715 fcport->last_rscn_gen = fcport->rscn_gen; 731 fcport->last_rscn_gen = fcport->rscn_gen;
716 fcport->last_login_gen = fcport->login_gen; 732 fcport->last_login_gen = fcport->login_gen;
733 spin_unlock(&vha->hw->tgt.sess_lock);
717 734
718 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 735 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
719 if (vha->gnl.sent) { 736 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
720 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
721 return QLA_SUCCESS;
722 }
723 vha->gnl.sent = 1;
724 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
725 737
726 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 738 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
727 if (!sp) 739 if (!sp)
@@ -1049,6 +1061,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1049 fc_port_t *fcport = ea->fcport; 1061 fc_port_t *fcport = ea->fcport;
1050 struct port_database_24xx *pd; 1062 struct port_database_24xx *pd;
1051 struct srb *sp = ea->sp; 1063 struct srb *sp = ea->sp;
1064 uint8_t ls;
1052 1065
1053 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1066 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1054 1067
@@ -1061,7 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1061 if (fcport->disc_state == DSC_DELETE_PEND) 1074 if (fcport->disc_state == DSC_DELETE_PEND)
1062 return; 1075 return;
1063 1076
1064 switch (pd->current_login_state) { 1077 if (fcport->fc4f_nvme)
1078 ls = pd->current_login_state >> 4;
1079 else
1080 ls = pd->current_login_state & 0xf;
1081
1082 switch (ls) {
1065 case PDS_PRLI_COMPLETE: 1083 case PDS_PRLI_COMPLETE:
1066 __qla24xx_parse_gpdb(vha, fcport, pd); 1084 __qla24xx_parse_gpdb(vha, fcport, pd);
1067 break; 1085 break;
@@ -1151,8 +1169,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1151 if (fcport->scan_state != QLA_FCPORT_FOUND) 1169 if (fcport->scan_state != QLA_FCPORT_FOUND)
1152 return 0; 1170 return 0;
1153 1171
1154 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1172 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1155 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1173 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1174 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1156 return 0; 1175 return 0;
1157 1176
1158 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1177 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
@@ -1527,6 +1546,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
1527 srb_t *sp = ptr; 1546 srb_t *sp = ptr;
1528 struct srb_iocb *abt = &sp->u.iocb_cmd; 1547 struct srb_iocb *abt = &sp->u.iocb_cmd;
1529 1548
1549 del_timer(&sp->u.iocb_cmd.timer);
1530 complete(&abt->u.abt.comp); 1550 complete(&abt->u.abt.comp);
1531} 1551}
1532 1552
@@ -1791,6 +1811,7 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1791 qla2x00_mark_device_lost(vha, fcport, 1, 0); 1811 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1792 qlt_logo_completion_handler(fcport, data[0]); 1812 qlt_logo_completion_handler(fcport, data[0]);
1793 fcport->login_gen++; 1813 fcport->login_gen++;
1814 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1794 return; 1815 return;
1795} 1816}
1796 1817
@@ -1798,6 +1819,7 @@ void
1798qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, 1819qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1799 uint16_t *data) 1820 uint16_t *data)
1800{ 1821{
1822 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1801 if (data[0] == MBS_COMMAND_COMPLETE) { 1823 if (data[0] == MBS_COMMAND_COMPLETE) {
1802 qla2x00_update_fcport(vha, fcport); 1824 qla2x00_update_fcport(vha, fcport);
1803 1825
@@ -1805,7 +1827,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1805 } 1827 }
1806 1828
1807 /* Retry login. */ 1829 /* Retry login. */
1808 fcport->flags &= ~FCF_ASYNC_SENT;
1809 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 1830 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1810 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1831 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1811 else 1832 else
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index afcb5567998a..5c5dcca4d1da 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -454,7 +454,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
454 ha->req_q_map[0] = req; 454 ha->req_q_map[0] = req;
455 set_bit(0, ha->rsp_qid_map); 455 set_bit(0, ha->rsp_qid_map);
456 set_bit(0, ha->req_qid_map); 456 set_bit(0, ha->req_qid_map);
457 return 1; 457 return 0;
458 458
459fail_qpair_map: 459fail_qpair_map:
460 kfree(ha->base_qpair); 460 kfree(ha->base_qpair);
@@ -471,6 +471,9 @@ fail_req_map:
471 471
472static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 472static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
473{ 473{
474 if (!ha->req_q_map)
475 return;
476
474 if (IS_QLAFX00(ha)) { 477 if (IS_QLAFX00(ha)) {
475 if (req && req->ring_fx00) 478 if (req && req->ring_fx00)
476 dma_free_coherent(&ha->pdev->dev, 479 dma_free_coherent(&ha->pdev->dev,
@@ -481,14 +484,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
481 (req->length + 1) * sizeof(request_t), 484 (req->length + 1) * sizeof(request_t),
482 req->ring, req->dma); 485 req->ring, req->dma);
483 486
484 if (req) 487 if (req) {
485 kfree(req->outstanding_cmds); 488 kfree(req->outstanding_cmds);
486 489 kfree(req);
487 kfree(req); 490 }
488} 491}
489 492
490static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 493static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
491{ 494{
495 if (!ha->rsp_q_map)
496 return;
497
492 if (IS_QLAFX00(ha)) { 498 if (IS_QLAFX00(ha)) {
493 if (rsp && rsp->ring) 499 if (rsp && rsp->ring)
494 dma_free_coherent(&ha->pdev->dev, 500 dma_free_coherent(&ha->pdev->dev,
@@ -499,7 +505,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
499 (rsp->length + 1) * sizeof(response_t), 505 (rsp->length + 1) * sizeof(response_t),
500 rsp->ring, rsp->dma); 506 rsp->ring, rsp->dma);
501 } 507 }
502 kfree(rsp); 508 if (rsp)
509 kfree(rsp);
503} 510}
504 511
505static void qla2x00_free_queues(struct qla_hw_data *ha) 512static void qla2x00_free_queues(struct qla_hw_data *ha)
@@ -1723,6 +1730,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1723 struct qla_tgt_cmd *cmd; 1730 struct qla_tgt_cmd *cmd;
1724 uint8_t trace = 0; 1731 uint8_t trace = 0;
1725 1732
1733 if (!ha->req_q_map)
1734 return;
1726 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1735 spin_lock_irqsave(qp->qp_lock_ptr, flags);
1727 req = qp->req; 1736 req = qp->req;
1728 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1737 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
@@ -3095,14 +3104,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3095 /* Set up the irqs */ 3104 /* Set up the irqs */
3096 ret = qla2x00_request_irqs(ha, rsp); 3105 ret = qla2x00_request_irqs(ha, rsp);
3097 if (ret) 3106 if (ret)
3098 goto probe_hw_failed; 3107 goto probe_failed;
3099 3108
3100 /* Alloc arrays of request and response ring ptrs */ 3109 /* Alloc arrays of request and response ring ptrs */
3101 if (!qla2x00_alloc_queues(ha, req, rsp)) { 3110 if (qla2x00_alloc_queues(ha, req, rsp)) {
3102 ql_log(ql_log_fatal, base_vha, 0x003d, 3111 ql_log(ql_log_fatal, base_vha, 0x003d,
3103 "Failed to allocate memory for queue pointers..." 3112 "Failed to allocate memory for queue pointers..."
3104 "aborting.\n"); 3113 "aborting.\n");
3105 goto probe_init_failed; 3114 goto probe_failed;
3106 } 3115 }
3107 3116
3108 if (ha->mqenable && shost_use_blk_mq(host)) { 3117 if (ha->mqenable && shost_use_blk_mq(host)) {
@@ -3387,15 +3396,6 @@ skip_dpc:
3387 3396
3388 return 0; 3397 return 0;
3389 3398
3390probe_init_failed:
3391 qla2x00_free_req_que(ha, req);
3392 ha->req_q_map[0] = NULL;
3393 clear_bit(0, ha->req_qid_map);
3394 qla2x00_free_rsp_que(ha, rsp);
3395 ha->rsp_q_map[0] = NULL;
3396 clear_bit(0, ha->rsp_qid_map);
3397 ha->max_req_queues = ha->max_rsp_queues = 0;
3398
3399probe_failed: 3399probe_failed:
3400 if (base_vha->timer_active) 3400 if (base_vha->timer_active)
3401 qla2x00_stop_timer(base_vha); 3401 qla2x00_stop_timer(base_vha);
@@ -4508,11 +4508,17 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4508 if (ha->init_cb) 4508 if (ha->init_cb)
4509 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4509 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
4510 ha->init_cb, ha->init_cb_dma); 4510 ha->init_cb, ha->init_cb_dma);
4511 vfree(ha->optrom_buffer); 4511
4512 kfree(ha->nvram); 4512 if (ha->optrom_buffer)
4513 kfree(ha->npiv_info); 4513 vfree(ha->optrom_buffer);
4514 kfree(ha->swl); 4514 if (ha->nvram)
4515 kfree(ha->loop_id_map); 4515 kfree(ha->nvram);
4516 if (ha->npiv_info)
4517 kfree(ha->npiv_info);
4518 if (ha->swl)
4519 kfree(ha->swl);
4520 if (ha->loop_id_map)
4521 kfree(ha->loop_id_map);
4516 4522
4517 ha->srb_mempool = NULL; 4523 ha->srb_mempool = NULL;
4518 ha->ctx_mempool = NULL; 4524 ha->ctx_mempool = NULL;
@@ -4528,6 +4534,15 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4528 ha->ex_init_cb_dma = 0; 4534 ha->ex_init_cb_dma = 0;
4529 ha->async_pd = NULL; 4535 ha->async_pd = NULL;
4530 ha->async_pd_dma = 0; 4536 ha->async_pd_dma = 0;
4537 ha->loop_id_map = NULL;
4538 ha->npiv_info = NULL;
4539 ha->optrom_buffer = NULL;
4540 ha->swl = NULL;
4541 ha->nvram = NULL;
4542 ha->mctp_dump = NULL;
4543 ha->dcbx_tlv = NULL;
4544 ha->xgmac_data = NULL;
4545 ha->sfp_data = NULL;
4531 4546
4532 ha->s_dma_pool = NULL; 4547 ha->s_dma_pool = NULL;
4533 ha->dl_dma_pool = NULL; 4548 ha->dl_dma_pool = NULL;
@@ -4577,6 +4592,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4577 4592
4578 spin_lock_init(&vha->work_lock); 4593 spin_lock_init(&vha->work_lock);
4579 spin_lock_init(&vha->cmd_list_lock); 4594 spin_lock_init(&vha->cmd_list_lock);
4595 spin_lock_init(&vha->gnl.fcports_lock);
4580 init_waitqueue_head(&vha->fcport_waitQ); 4596 init_waitqueue_head(&vha->fcport_waitQ);
4581 init_waitqueue_head(&vha->vref_waitq); 4597 init_waitqueue_head(&vha->vref_waitq);
4582 4598
@@ -4806,9 +4822,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4806 fcport->d_id = e->u.new_sess.id; 4822 fcport->d_id = e->u.new_sess.id;
4807 fcport->flags |= FCF_FABRIC_DEVICE; 4823 fcport->flags |= FCF_FABRIC_DEVICE;
4808 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4824 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4809 if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) 4825 if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) {
4810 fcport->fc4_type = FC4_TYPE_FCP_SCSI; 4826 fcport->fc4_type = FC4_TYPE_FCP_SCSI;
4811 4827 } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) {
4828 fcport->fc4_type = FC4_TYPE_OTHER;
4829 fcport->fc4f_nvme = FC4_TYPE_NVME;
4830 }
4812 memcpy(fcport->port_name, e->u.new_sess.port_name, 4831 memcpy(fcport->port_name, e->u.new_sess.port_name,
4813 WWN_SIZE); 4832 WWN_SIZE);
4814 } else { 4833 } else {
@@ -4877,6 +4896,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4877 } 4896 }
4878 qlt_plogi_ack_unref(vha, pla); 4897 qlt_plogi_ack_unref(vha, pla);
4879 } else { 4898 } else {
4899 fc_port_t *dfcp = NULL;
4900
4880 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4901 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4881 tfcp = qla2x00_find_fcport_by_nportid(vha, 4902 tfcp = qla2x00_find_fcport_by_nportid(vha,
4882 &e->u.new_sess.id, 1); 4903 &e->u.new_sess.id, 1);
@@ -4899,11 +4920,13 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4899 default: 4920 default:
4900 fcport->login_pause = 1; 4921 fcport->login_pause = 1;
4901 tfcp->conflict = fcport; 4922 tfcp->conflict = fcport;
4902 qlt_schedule_sess_for_deletion(tfcp); 4923 dfcp = tfcp;
4903 break; 4924 break;
4904 } 4925 }
4905 } 4926 }
4906 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4927 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4928 if (dfcp)
4929 qlt_schedule_sess_for_deletion(tfcp);
4907 4930
4908 wwn = wwn_to_u64(fcport->node_name); 4931 wwn = wwn_to_u64(fcport->node_name);
4909 4932
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 896b2d8bd803..b49ac85f3de2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1224,10 +1224,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
1224 } 1224 }
1225} 1225}
1226 1226
1227/* ha->tgt.sess_lock supposed to be held on entry */
1228void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1227void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1229{ 1228{
1230 struct qla_tgt *tgt = sess->tgt; 1229 struct qla_tgt *tgt = sess->tgt;
1230 struct qla_hw_data *ha = sess->vha->hw;
1231 unsigned long flags; 1231 unsigned long flags;
1232 1232
1233 if (sess->disc_state == DSC_DELETE_PEND) 1233 if (sess->disc_state == DSC_DELETE_PEND)
@@ -1244,16 +1244,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1244 return; 1244 return;
1245 } 1245 }
1246 1246
1247 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1247 if (sess->deleted == QLA_SESS_DELETED) 1248 if (sess->deleted == QLA_SESS_DELETED)
1248 sess->logout_on_delete = 0; 1249 sess->logout_on_delete = 0;
1249 1250
1250 spin_lock_irqsave(&sess->vha->work_lock, flags);
1251 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1251 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1252 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1252 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1253 return; 1253 return;
1254 } 1254 }
1255 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1255 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1256 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1256 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1257 1257
1258 sess->disc_state = DSC_DELETE_PEND; 1258 sess->disc_state = DSC_DELETE_PEND;
1259 1259
@@ -1262,13 +1262,10 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1262 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1262 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1263 "Scheduling sess %p for deletion\n", sess); 1263 "Scheduling sess %p for deletion\n", sess);
1264 1264
1265 /* use cancel to push work element through before re-queue */
1266 cancel_work_sync(&sess->del_work);
1267 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1265 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
1268 queue_work(sess->vha->hw->wq, &sess->del_work); 1266 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1269} 1267}
1270 1268
1271/* ha->tgt.sess_lock supposed to be held on entry */
1272static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1269static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1273{ 1270{
1274 struct fc_port *sess; 1271 struct fc_port *sess;
@@ -1451,8 +1448,8 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1452 1449
1453 sess->local = 1; 1450 sess->local = 1;
1454 qlt_schedule_sess_for_deletion(sess);
1455 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452 qlt_schedule_sess_for_deletion(sess);
1456} 1453}
1457 1454
1458static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1455static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1512,10 +1509,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
1512 * Lock is needed, because we still can get an incoming packet. 1509 * Lock is needed, because we still can get an incoming packet.
1513 */ 1510 */
1514 mutex_lock(&vha->vha_tgt.tgt_mutex); 1511 mutex_lock(&vha->vha_tgt.tgt_mutex);
1515 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1516 tgt->tgt_stop = 1; 1512 tgt->tgt_stop = 1;
1517 qlt_clear_tgt_db(tgt); 1513 qlt_clear_tgt_db(tgt);
1518 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1519 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1514 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1520 mutex_unlock(&qla_tgt_mutex); 1515 mutex_unlock(&qla_tgt_mutex);
1521 1516
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d042915ce895..ca53a5f785ee 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,7 +223,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
223 223
224static void scsi_eh_inc_host_failed(struct rcu_head *head) 224static void scsi_eh_inc_host_failed(struct rcu_head *head)
225{ 225{
226 struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); 226 struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
227 struct Scsi_Host *shost = scmd->device->host;
227 unsigned long flags; 228 unsigned long flags;
228 229
229 spin_lock_irqsave(shost->host_lock, flags); 230 spin_lock_irqsave(shost->host_lock, flags);
@@ -259,7 +260,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
259 * Ensure that all tasks observe the host state change before the 260 * Ensure that all tasks observe the host state change before the
260 * host_failed change. 261 * host_failed change.
261 */ 262 */
262 call_rcu(&shost->rcu, scsi_eh_inc_host_failed); 263 call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
263} 264}
264 265
265/** 266/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a86df9ca7d1c..c84f931388f2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -671,6 +671,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
671 if (!blk_rq_is_scsi(req)) { 671 if (!blk_rq_is_scsi(req)) {
672 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); 672 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
673 cmd->flags &= ~SCMD_INITIALIZED; 673 cmd->flags &= ~SCMD_INITIALIZED;
674 destroy_rcu_head(&cmd->rcu);
674 } 675 }
675 676
676 if (req->mq_ctx) { 677 if (req->mq_ctx) {
@@ -720,6 +721,8 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
720 int result) 721 int result)
721{ 722{
722 switch (host_byte(result)) { 723 switch (host_byte(result)) {
724 case DID_OK:
725 return BLK_STS_OK;
723 case DID_TRANSPORT_FAILFAST: 726 case DID_TRANSPORT_FAILFAST:
724 return BLK_STS_TRANSPORT; 727 return BLK_STS_TRANSPORT;
725 case DID_TARGET_FAILURE: 728 case DID_TARGET_FAILURE:
@@ -1151,6 +1154,7 @@ static void scsi_initialize_rq(struct request *rq)
1151 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1154 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1152 1155
1153 scsi_req_init(&cmd->req); 1156 scsi_req_init(&cmd->req);
1157 init_rcu_head(&cmd->rcu);
1154 cmd->jiffies_at_alloc = jiffies; 1158 cmd->jiffies_at_alloc = jiffies;
1155 cmd->retries = 0; 1159 cmd->retries = 0;
1156} 1160}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bff21e636ddd..3541caf3fceb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2595,6 +2595,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2595 int res; 2595 int res;
2596 struct scsi_device *sdp = sdkp->device; 2596 struct scsi_device *sdp = sdkp->device;
2597 struct scsi_mode_data data; 2597 struct scsi_mode_data data;
2598 int disk_ro = get_disk_ro(sdkp->disk);
2598 int old_wp = sdkp->write_prot; 2599 int old_wp = sdkp->write_prot;
2599 2600
2600 set_disk_ro(sdkp->disk, 0); 2601 set_disk_ro(sdkp->disk, 0);
@@ -2635,7 +2636,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2635 "Test WP failed, assume Write Enabled\n"); 2636 "Test WP failed, assume Write Enabled\n");
2636 } else { 2637 } else {
2637 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2638 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2638 set_disk_ro(sdkp->disk, sdkp->write_prot); 2639 set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
2639 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2640 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2640 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2641 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2641 sdkp->write_prot ? "on" : "off"); 2642 sdkp->write_prot ? "on" : "off");
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6c348a211ebb..89cf4498f535 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -403,7 +403,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
403 */ 403 */
404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
405{ 405{
406 u64 zone_blocks; 406 u64 zone_blocks = 0;
407 sector_t block = 0; 407 sector_t block = 0;
408 unsigned char *buf; 408 unsigned char *buf;
409 unsigned char *rec; 409 unsigned char *rec;
@@ -421,10 +421,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
421 421
422 /* Do a report zone to get the same field */ 422 /* Do a report zone to get the same field */
423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); 423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
424 if (ret) { 424 if (ret)
425 zone_blocks = 0; 425 goto out_free;
426 goto out;
427 }
428 426
429 same = buf[4] & 0x0f; 427 same = buf[4] & 0x0f;
430 if (same > 0) { 428 if (same > 0) {
@@ -464,7 +462,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
464 ret = sd_zbc_report_zones(sdkp, buf, 462 ret = sd_zbc_report_zones(sdkp, buf,
465 SD_ZBC_BUF_SIZE, block); 463 SD_ZBC_BUF_SIZE, block);
466 if (ret) 464 if (ret)
467 return ret; 465 goto out_free;
468 } 466 }
469 467
470 } while (block < sdkp->capacity); 468 } while (block < sdkp->capacity);
@@ -472,35 +470,32 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
472 zone_blocks = sdkp->zone_blocks; 470 zone_blocks = sdkp->zone_blocks;
473 471
474out: 472out:
475 kfree(buf);
476
477 if (!zone_blocks) { 473 if (!zone_blocks) {
478 if (sdkp->first_scan) 474 if (sdkp->first_scan)
479 sd_printk(KERN_NOTICE, sdkp, 475 sd_printk(KERN_NOTICE, sdkp,
480 "Devices with non constant zone " 476 "Devices with non constant zone "
481 "size are not supported\n"); 477 "size are not supported\n");
482 return -ENODEV; 478 ret = -ENODEV;
483 } 479 } else if (!is_power_of_2(zone_blocks)) {
484
485 if (!is_power_of_2(zone_blocks)) {
486 if (sdkp->first_scan) 480 if (sdkp->first_scan)
487 sd_printk(KERN_NOTICE, sdkp, 481 sd_printk(KERN_NOTICE, sdkp,
488 "Devices with non power of 2 zone " 482 "Devices with non power of 2 zone "
489 "size are not supported\n"); 483 "size are not supported\n");
490 return -ENODEV; 484 ret = -ENODEV;
491 } 485 } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
492
493 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
494 if (sdkp->first_scan) 486 if (sdkp->first_scan)
495 sd_printk(KERN_NOTICE, sdkp, 487 sd_printk(KERN_NOTICE, sdkp,
496 "Zone size too large\n"); 488 "Zone size too large\n");
497 return -ENODEV; 489 ret = -ENODEV;
490 } else {
491 sdkp->zone_blocks = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
498 } 493 }
499 494
500 sdkp->zone_blocks = zone_blocks; 495out_free:
501 sdkp->zone_shift = ilog2(zone_blocks); 496 kfree(buf);
502 497
503 return 0; 498 return ret;
504} 499}
505 500
506/** 501/**
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 6be5ab32c94f..8c51d628b52e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1311,7 +1311,8 @@ static int storvsc_do_io(struct hv_device *device,
1311 */ 1311 */
1312 cpumask_and(&alloced_mask, &stor_device->alloced_cpus, 1312 cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
1313 cpumask_of_node(cpu_to_node(q_num))); 1313 cpumask_of_node(cpu_to_node(q_num)));
1314 for_each_cpu(tgt_cpu, &alloced_mask) { 1314 for_each_cpu_wrap(tgt_cpu, &alloced_mask,
1315 outgoing_channel->target_cpu + 1) {
1315 if (tgt_cpu != outgoing_channel->target_cpu) { 1316 if (tgt_cpu != outgoing_channel->target_cpu) {
1316 outgoing_channel = 1317 outgoing_channel =
1317 stor_device->stor_chns[tgt_cpu]; 1318 stor_device->stor_chns[tgt_cpu];
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index cfb42f5eccb2..750f93197411 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev)
470 470
471static int imx_gpc_remove(struct platform_device *pdev) 471static int imx_gpc_remove(struct platform_device *pdev)
472{ 472{
473 struct device_node *pgc_node;
473 int ret; 474 int ret;
474 475
476 pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
477
478 /* bail out if DT too old and doesn't provide the necessary info */
479 if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
480 !pgc_node)
481 return 0;
482
475 /* 483 /*
476 * If the old DT binding is used the toplevel driver needs to 484 * If the old DT binding is used the toplevel driver needs to
477 * de-register the power domains 485 * de-register the power domains
478 */ 486 */
479 if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { 487 if (!pgc_node) {
480 of_genpd_del_provider(pdev->dev.of_node); 488 of_genpd_del_provider(pdev->dev.of_node);
481 489
482 ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); 490 ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 6dbba5aff191..86580b6df33d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -326,24 +326,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
326 mutex_lock(&ashmem_mutex); 326 mutex_lock(&ashmem_mutex);
327 327
328 if (asma->size == 0) { 328 if (asma->size == 0) {
329 ret = -EINVAL; 329 mutex_unlock(&ashmem_mutex);
330 goto out; 330 return -EINVAL;
331 } 331 }
332 332
333 if (!asma->file) { 333 if (!asma->file) {
334 ret = -EBADF; 334 mutex_unlock(&ashmem_mutex);
335 goto out; 335 return -EBADF;
336 } 336 }
337 337
338 mutex_unlock(&ashmem_mutex);
339
338 ret = vfs_llseek(asma->file, offset, origin); 340 ret = vfs_llseek(asma->file, offset, origin);
339 if (ret < 0) 341 if (ret < 0)
340 goto out; 342 return ret;
341 343
342 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 344 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
343 file->f_pos = asma->file->f_pos; 345 file->f_pos = asma->file->f_pos;
344
345out:
346 mutex_unlock(&ashmem_mutex);
347 return ret; 346 return ret;
348} 347}
349 348
@@ -702,16 +701,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
702 size_t pgstart, pgend; 701 size_t pgstart, pgend;
703 int ret = -EINVAL; 702 int ret = -EINVAL;
704 703
704 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
705 return -EFAULT;
706
705 mutex_lock(&ashmem_mutex); 707 mutex_lock(&ashmem_mutex);
706 708
707 if (unlikely(!asma->file)) 709 if (unlikely(!asma->file))
708 goto out_unlock; 710 goto out_unlock;
709 711
710 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
711 ret = -EFAULT;
712 goto out_unlock;
713 }
714
715 /* per custom, you can pass zero for len to mean "everything onward" */ 712 /* per custom, you can pass zero for len to mean "everything onward" */
716 if (!pin.len) 713 if (!pin.len)
717 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 714 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e618a87521a3..9d733471ca2e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -475,8 +475,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
475 struct comedi_cmd *cmd = &async->cmd; 475 struct comedi_cmd *cmd = &async->cmd;
476 476
477 if (cmd->stop_src == TRIG_COUNT) { 477 if (cmd->stop_src == TRIG_COUNT) {
478 unsigned int nscans = nsamples / cmd->scan_end_arg; 478 unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
479 unsigned int scans_left = __comedi_nscans_left(s, nscans);
480 unsigned int scan_pos = 479 unsigned int scan_pos =
481 comedi_bytes_to_samples(s, async->scan_progress); 480 comedi_bytes_to_samples(s, async->scan_progress);
482 unsigned long long samples_left = 0; 481 unsigned long long samples_left = 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5c0e59e8fe46..cbe98bc2b998 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2180,6 +2180,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2180 } 2180 }
2181 if (tty_hung_up_p(file)) 2181 if (tty_hung_up_p(file))
2182 break; 2182 break;
2183 /*
2184 * Abort readers for ttys which never actually
2185 * get hung up. See __tty_hangup().
2186 */
2187 if (test_bit(TTY_HUPPING, &tty->flags))
2188 break;
2183 if (!timeout) 2189 if (!timeout)
2184 break; 2190 break;
2185 if (file->f_flags & O_NONBLOCK) { 2191 if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 881730cd48c1..3296a05cda2d 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3384,11 +3384,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev)
3384 /* 3384 /*
3385 * If it is not a communications device or the programming 3385 * If it is not a communications device or the programming
3386 * interface is greater than 6, give up. 3386 * interface is greater than 6, give up.
3387 *
3388 * (Should we try to make guesses for multiport serial devices
3389 * later?)
3390 */ 3387 */
3391 if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && 3388 if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) &&
3389 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) &&
3392 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || 3390 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) ||
3393 (dev->class & 0xff) > 6) 3391 (dev->class & 0xff) > 6)
3394 return -ENODEV; 3392 return -ENODEV;
@@ -3425,6 +3423,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
3425{ 3423{
3426 int num_iomem, num_port, first_port = -1, i; 3424 int num_iomem, num_port, first_port = -1, i;
3427 3425
3426 /*
3427 * Should we try to make guesses for multiport serial devices later?
3428 */
3429 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL)
3430 return -ENODEV;
3431
3428 num_iomem = num_port = 0; 3432 num_iomem = num_port = 0;
3429 for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { 3433 for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
3430 if (pci_resource_flags(dev, i) & IORESOURCE_IO) { 3434 if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
@@ -4696,6 +4700,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
4696 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ 4700 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
4697 pbn_b2_4_115200 }, 4701 pbn_b2_4_115200 },
4698 /* 4702 /*
4703 * BrainBoxes UC-260
4704 */
4705 { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
4706 PCI_ANY_ID, PCI_ANY_ID,
4707 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4708 pbn_b2_4_115200 },
4709 { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
4710 PCI_ANY_ID, PCI_ANY_ID,
4711 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4712 pbn_b2_4_115200 },
4713 /*
4699 * Perle PCI-RAS cards 4714 * Perle PCI-RAS cards
4700 */ 4715 */
4701 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, 4716 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index df46a9e88c34..e287fe8f10fc 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1734,6 +1734,7 @@ static void atmel_get_ip_name(struct uart_port *port)
1734 switch (version) { 1734 switch (version) {
1735 case 0x302: 1735 case 0x302:
1736 case 0x10213: 1736 case 0x10213:
1737 case 0x10302:
1737 dev_dbg(port->dev, "This version is usart\n"); 1738 dev_dbg(port->dev, "This version is usart\n");
1738 atmel_port->has_frac_baudrate = true; 1739 atmel_port->has_frac_baudrate = true;
1739 atmel_port->has_hw_timer = true; 1740 atmel_port->has_hw_timer = true;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 870e84fb6e39..a24278380fec 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -245,11 +245,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
245 } 245 }
246 port->mapbase = addr; 246 port->mapbase = addr;
247 port->uartclk = BASE_BAUD * 16; 247 port->uartclk = BASE_BAUD * 16;
248 port->membase = earlycon_map(port->mapbase, SZ_4K);
249 248
250 val = of_get_flat_dt_prop(node, "reg-offset", NULL); 249 val = of_get_flat_dt_prop(node, "reg-offset", NULL);
251 if (val) 250 if (val)
252 port->mapbase += be32_to_cpu(*val); 251 port->mapbase += be32_to_cpu(*val);
252 port->membase = earlycon_map(port->mapbase, SZ_4K);
253
253 val = of_get_flat_dt_prop(node, "reg-shift", NULL); 254 val = of_get_flat_dt_prop(node, "reg-shift", NULL);
254 if (val) 255 if (val)
255 port->regshift = be32_to_cpu(*val); 256 port->regshift = be32_to_cpu(*val);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 552fd050f2bb..91f3a1a5cb7f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2262,7 +2262,7 @@ static int imx_uart_probe(struct platform_device *pdev)
2262 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); 2262 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
2263 2263
2264 if (sport->port.rs485.flags & SER_RS485_ENABLED && 2264 if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2265 (!sport->have_rtscts || !sport->have_rtsgpio)) 2265 (!sport->have_rtscts && !sport->have_rtsgpio))
2266 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2266 dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
2267 2267
2268 imx_uart_rs485_config(&sport->port, &sport->port.rs485); 2268 imx_uart_rs485_config(&sport->port, &sport->port.rs485);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index c8dde56b532b..35b9201db3b4 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1144,6 +1144,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
1144 uport->ops->config_port(uport, flags); 1144 uport->ops->config_port(uport, flags);
1145 1145
1146 ret = uart_startup(tty, state, 1); 1146 ret = uart_startup(tty, state, 1);
1147 if (ret == 0)
1148 tty_port_set_initialized(port, true);
1147 if (ret > 0) 1149 if (ret > 0)
1148 ret = 0; 1150 ret = 0;
1149 } 1151 }
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 0ec2d938011d..fdbbff547106 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -886,6 +886,8 @@ static void sci_receive_chars(struct uart_port *port)
886 /* Tell the rest of the system the news. New characters! */ 886 /* Tell the rest of the system the news. New characters! */
887 tty_flip_buffer_push(tport); 887 tty_flip_buffer_push(tport);
888 } else { 888 } else {
889 /* TTY buffers full; read from RX reg to prevent lockup */
890 serial_port_in(port, SCxRDR);
889 serial_port_in(port, SCxSR); /* dummy read */ 891 serial_port_in(port, SCxSR); /* dummy read */
890 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); 892 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
891 } 893 }
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index eb9133b472f4..63114ea35ec1 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -586,6 +586,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
586 return; 586 return;
587 } 587 }
588 588
589 /*
590 * Some console devices aren't actually hung up for technical and
591 * historical reasons, which can lead to indefinite interruptible
592 * sleep in n_tty_read(). The following explicitly tells
593 * n_tty_read() to abort readers.
594 */
595 set_bit(TTY_HUPPING, &tty->flags);
596
589 /* inuse_filps is protected by the single tty lock, 597 /* inuse_filps is protected by the single tty lock,
590 this really needs to change if we want to flush the 598 this really needs to change if we want to flush the
591 workqueue with the lock held */ 599 workqueue with the lock held */
@@ -640,6 +648,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
640 * from the ldisc side, which is now guaranteed. 648 * from the ldisc side, which is now guaranteed.
641 */ 649 */
642 set_bit(TTY_HUPPED, &tty->flags); 650 set_bit(TTY_HUPPED, &tty->flags);
651 clear_bit(TTY_HUPPING, &tty->flags);
643 tty_unlock(tty); 652 tty_unlock(tty);
644 653
645 if (f) 654 if (f)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c64cf6c4a83d..0c11d40a12bc 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -151,6 +151,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
151 151
152 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); 152 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
153 153
154 /* Linger a bit, prior to the next control message. */
155 if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
156 msleep(200);
157
154 kfree(dr); 158 kfree(dr);
155 159
156 return ret; 160 return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f4a548471f0f..54b019e267c5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -230,7 +230,8 @@ static const struct usb_device_id usb_quirk_list[] = {
230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
231 231
232 /* Corsair Strafe RGB */ 232 /* Corsair Strafe RGB */
233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, 233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
234 USB_QUIRK_DELAY_CTRL_MSG },
234 235
235 /* Corsair K70 LUX */ 236 /* Corsair K70 LUX */
236 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 237 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 03fd20f0b496..c4a47496d2fb 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
137 p->activate_stm_fs_transceiver = true; 137 p->activate_stm_fs_transceiver = true;
138} 138}
139 139
140static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) 140static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
141{ 141{
142 struct dwc2_core_params *p = &hsotg->params; 142 struct dwc2_core_params *p = &hsotg->params;
143 143
@@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {
164 { .compatible = "st,stm32f4x9-fsotg", 164 { .compatible = "st,stm32f4x9-fsotg",
165 .data = dwc2_set_stm32f4x9_fsotg_params }, 165 .data = dwc2_set_stm32f4x9_fsotg_params },
166 { .compatible = "st,stm32f4x9-hsotg" }, 166 { .compatible = "st,stm32f4x9-hsotg" },
167 { .compatible = "st,stm32f7xx-hsotg", 167 { .compatible = "st,stm32f7-hsotg",
168 .data = dwc2_set_stm32f7xx_hsotg_params }, 168 .data = dwc2_set_stm32f7_hsotg_params },
169 {}, 169 {},
170}; 170};
171MODULE_DEVICE_TABLE(of, dwc2_of_match_table); 171MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f1d838a4acd6..e94bf91cc58a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -175,7 +175,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
175 dwc->desired_dr_role = mode; 175 dwc->desired_dr_role = mode;
176 spin_unlock_irqrestore(&dwc->lock, flags); 176 spin_unlock_irqrestore(&dwc->lock, flags);
177 177
178 queue_work(system_power_efficient_wq, &dwc->drd_work); 178 queue_work(system_freezable_wq, &dwc->drd_work);
179} 179}
180 180
181u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 181u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index c2592d883f67..d2428a9e8900 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1538,7 +1538,6 @@ ffs_fs_kill_sb(struct super_block *sb)
1538 if (sb->s_fs_info) { 1538 if (sb->s_fs_info) {
1539 ffs_release_dev(sb->s_fs_info); 1539 ffs_release_dev(sb->s_fs_info);
1540 ffs_data_closed(sb->s_fs_info); 1540 ffs_data_closed(sb->s_fs_info);
1541 ffs_data_put(sb->s_fs_info);
1542 } 1541 }
1543} 1542}
1544 1543
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 84f88fa411cd..d088c340e4d0 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -447,7 +447,8 @@ static int ohci_init (struct ohci_hcd *ohci)
447 struct usb_hcd *hcd = ohci_to_hcd(ohci); 447 struct usb_hcd *hcd = ohci_to_hcd(ohci);
448 448
449 /* Accept arbitrarily long scatter-gather lists */ 449 /* Accept arbitrarily long scatter-gather lists */
450 hcd->self.sg_tablesize = ~0; 450 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
451 hcd->self.sg_tablesize = ~0;
451 452
452 if (distrust_firmware) 453 if (distrust_firmware)
453 ohci->flags |= OHCI_QUIRK_HUB_POWER; 454 ohci->flags |= OHCI_QUIRK_HUB_POWER;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index a1ab8acf39ba..c359bae7b754 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -328,13 +328,14 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, 328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
329 gfp_t gfp_flags) 329 gfp_t gfp_flags)
330{ 330{
331 unsigned long flags;
331 struct xhci_dbc *dbc = dep->dbc; 332 struct xhci_dbc *dbc = dep->dbc;
332 int ret = -ESHUTDOWN; 333 int ret = -ESHUTDOWN;
333 334
334 spin_lock(&dbc->lock); 335 spin_lock_irqsave(&dbc->lock, flags);
335 if (dbc->state == DS_CONFIGURED) 336 if (dbc->state == DS_CONFIGURED)
336 ret = dbc_ep_do_queue(dep, req); 337 ret = dbc_ep_do_queue(dep, req);
337 spin_unlock(&dbc->lock); 338 spin_unlock_irqrestore(&dbc->lock, flags);
338 339
339 mod_delayed_work(system_wq, &dbc->event_work, 0); 340 mod_delayed_work(system_wq, &dbc->event_work, 0);
340 341
@@ -521,15 +522,16 @@ static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
521static int xhci_dbc_start(struct xhci_hcd *xhci) 522static int xhci_dbc_start(struct xhci_hcd *xhci)
522{ 523{
523 int ret; 524 int ret;
525 unsigned long flags;
524 struct xhci_dbc *dbc = xhci->dbc; 526 struct xhci_dbc *dbc = xhci->dbc;
525 527
526 WARN_ON(!dbc); 528 WARN_ON(!dbc);
527 529
528 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); 530 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
529 531
530 spin_lock(&dbc->lock); 532 spin_lock_irqsave(&dbc->lock, flags);
531 ret = xhci_do_dbc_start(xhci); 533 ret = xhci_do_dbc_start(xhci);
532 spin_unlock(&dbc->lock); 534 spin_unlock_irqrestore(&dbc->lock, flags);
533 535
534 if (ret) { 536 if (ret) {
535 pm_runtime_put(xhci_to_hcd(xhci)->self.controller); 537 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
@@ -541,6 +543,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
541 543
542static void xhci_dbc_stop(struct xhci_hcd *xhci) 544static void xhci_dbc_stop(struct xhci_hcd *xhci)
543{ 545{
546 unsigned long flags;
544 struct xhci_dbc *dbc = xhci->dbc; 547 struct xhci_dbc *dbc = xhci->dbc;
545 struct dbc_port *port = &dbc->port; 548 struct dbc_port *port = &dbc->port;
546 549
@@ -551,9 +554,9 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
551 if (port->registered) 554 if (port->registered)
552 xhci_dbc_tty_unregister_device(xhci); 555 xhci_dbc_tty_unregister_device(xhci);
553 556
554 spin_lock(&dbc->lock); 557 spin_lock_irqsave(&dbc->lock, flags);
555 xhci_do_dbc_stop(xhci); 558 xhci_do_dbc_stop(xhci);
556 spin_unlock(&dbc->lock); 559 spin_unlock_irqrestore(&dbc->lock, flags);
557 560
558 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 561 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
559} 562}
@@ -779,14 +782,15 @@ static void xhci_dbc_handle_events(struct work_struct *work)
779 int ret; 782 int ret;
780 enum evtreturn evtr; 783 enum evtreturn evtr;
781 struct xhci_dbc *dbc; 784 struct xhci_dbc *dbc;
785 unsigned long flags;
782 struct xhci_hcd *xhci; 786 struct xhci_hcd *xhci;
783 787
784 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); 788 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
785 xhci = dbc->xhci; 789 xhci = dbc->xhci;
786 790
787 spin_lock(&dbc->lock); 791 spin_lock_irqsave(&dbc->lock, flags);
788 evtr = xhci_dbc_do_handle_events(dbc); 792 evtr = xhci_dbc_do_handle_events(dbc);
789 spin_unlock(&dbc->lock); 793 spin_unlock_irqrestore(&dbc->lock, flags);
790 794
791 switch (evtr) { 795 switch (evtr) {
792 case EVT_GSER: 796 case EVT_GSER:
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 8d47b6fbf973..75f0b92694ba 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -92,21 +92,23 @@ static void dbc_start_rx(struct dbc_port *port)
92static void 92static void
93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) 93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
94{ 94{
95 unsigned long flags;
95 struct xhci_dbc *dbc = xhci->dbc; 96 struct xhci_dbc *dbc = xhci->dbc;
96 struct dbc_port *port = &dbc->port; 97 struct dbc_port *port = &dbc->port;
97 98
98 spin_lock(&port->port_lock); 99 spin_lock_irqsave(&port->port_lock, flags);
99 list_add_tail(&req->list_pool, &port->read_queue); 100 list_add_tail(&req->list_pool, &port->read_queue);
100 tasklet_schedule(&port->push); 101 tasklet_schedule(&port->push);
101 spin_unlock(&port->port_lock); 102 spin_unlock_irqrestore(&port->port_lock, flags);
102} 103}
103 104
104static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) 105static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
105{ 106{
107 unsigned long flags;
106 struct xhci_dbc *dbc = xhci->dbc; 108 struct xhci_dbc *dbc = xhci->dbc;
107 struct dbc_port *port = &dbc->port; 109 struct dbc_port *port = &dbc->port;
108 110
109 spin_lock(&port->port_lock); 111 spin_lock_irqsave(&port->port_lock, flags);
110 list_add(&req->list_pool, &port->write_pool); 112 list_add(&req->list_pool, &port->write_pool);
111 switch (req->status) { 113 switch (req->status) {
112 case 0: 114 case 0:
@@ -119,7 +121,7 @@ static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
119 req->status); 121 req->status);
120 break; 122 break;
121 } 123 }
122 spin_unlock(&port->port_lock); 124 spin_unlock_irqrestore(&port->port_lock, flags);
123} 125}
124 126
125static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) 127static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
@@ -327,12 +329,13 @@ static void dbc_rx_push(unsigned long _port)
327{ 329{
328 struct dbc_request *req; 330 struct dbc_request *req;
329 struct tty_struct *tty; 331 struct tty_struct *tty;
332 unsigned long flags;
330 bool do_push = false; 333 bool do_push = false;
331 bool disconnect = false; 334 bool disconnect = false;
332 struct dbc_port *port = (void *)_port; 335 struct dbc_port *port = (void *)_port;
333 struct list_head *queue = &port->read_queue; 336 struct list_head *queue = &port->read_queue;
334 337
335 spin_lock_irq(&port->port_lock); 338 spin_lock_irqsave(&port->port_lock, flags);
336 tty = port->port.tty; 339 tty = port->port.tty;
337 while (!list_empty(queue)) { 340 while (!list_empty(queue)) {
338 req = list_first_entry(queue, struct dbc_request, list_pool); 341 req = list_first_entry(queue, struct dbc_request, list_pool);
@@ -392,16 +395,17 @@ static void dbc_rx_push(unsigned long _port)
392 if (!disconnect) 395 if (!disconnect)
393 dbc_start_rx(port); 396 dbc_start_rx(port);
394 397
395 spin_unlock_irq(&port->port_lock); 398 spin_unlock_irqrestore(&port->port_lock, flags);
396} 399}
397 400
398static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) 401static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
399{ 402{
403 unsigned long flags;
400 struct dbc_port *port = container_of(_port, struct dbc_port, port); 404 struct dbc_port *port = container_of(_port, struct dbc_port, port);
401 405
402 spin_lock_irq(&port->port_lock); 406 spin_lock_irqsave(&port->port_lock, flags);
403 dbc_start_rx(port); 407 dbc_start_rx(port);
404 spin_unlock_irq(&port->port_lock); 408 spin_unlock_irqrestore(&port->port_lock, flags);
405 409
406 return 0; 410 return 0;
407} 411}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5262fa571a5d..d9f831b67e57 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -126,6 +126,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
127 xhci->quirks |= XHCI_AMD_PLL_FIX; 127 xhci->quirks |= XHCI_AMD_PLL_FIX;
128 128
129 if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
130 xhci->quirks |= XHCI_SUSPEND_DELAY;
131
129 if (pdev->vendor == PCI_VENDOR_ID_AMD) 132 if (pdev->vendor == PCI_VENDOR_ID_AMD)
130 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 133 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
131 134
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6f038306c14d..6652e2d5bd2e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -360,7 +360,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
360{ 360{
361 struct usb_hcd *hcd = dev_get_drvdata(dev); 361 struct usb_hcd *hcd = dev_get_drvdata(dev);
362 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
363 int ret;
364 363
365 /* 364 /*
366 * xhci_suspend() needs `do_wakeup` to know whether host is allowed 365 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
@@ -370,12 +369,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
370 * reconsider this when xhci_plat_suspend enlarges its scope, e.g., 369 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
371 * also applies to runtime suspend. 370 * also applies to runtime suspend.
372 */ 371 */
373 ret = xhci_suspend(xhci, device_may_wakeup(dev)); 372 return xhci_suspend(xhci, device_may_wakeup(dev));
374
375 if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
376 clk_disable_unprepare(xhci->clk);
377
378 return ret;
379} 373}
380 374
381static int __maybe_unused xhci_plat_resume(struct device *dev) 375static int __maybe_unused xhci_plat_resume(struct device *dev)
@@ -384,9 +378,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
384 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 378 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
385 int ret; 379 int ret;
386 380
387 if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
388 clk_prepare_enable(xhci->clk);
389
390 ret = xhci_priv_resume_quirk(hcd); 381 ret = xhci_priv_resume_quirk(hcd);
391 if (ret) 382 if (ret)
392 return ret; 383 return ret;
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index f0b559660007..f33ffc2bc4ed 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -83,6 +83,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = {
83 .soc_id = "r8a7796", 83 .soc_id = "r8a7796",
84 .data = (void *)RCAR_XHCI_FIRMWARE_V3, 84 .data = (void *)RCAR_XHCI_FIRMWARE_V3,
85 }, 85 },
86 {
87 .soc_id = "r8a77965",
88 .data = (void *)RCAR_XHCI_FIRMWARE_V3,
89 },
86 { /* sentinel */ }, 90 { /* sentinel */ },
87}; 91};
88 92
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 25d4b748a56f..5d37700ae4b0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -877,6 +877,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
878 del_timer_sync(&xhci->shared_hcd->rh_timer); 878 del_timer_sync(&xhci->shared_hcd->rh_timer);
879 879
880 if (xhci->quirks & XHCI_SUSPEND_DELAY)
881 usleep_range(1000, 1500);
882
880 spin_lock_irq(&xhci->lock); 883 spin_lock_irq(&xhci->lock);
881 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
882 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 885 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e4d7d3d06a75..866e141d4972 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -718,11 +718,12 @@ struct xhci_ep_ctx {
718/* bits 10:14 are Max Primary Streams */ 718/* bits 10:14 are Max Primary Streams */
719/* bit 15 is Linear Stream Array */ 719/* bit 15 is Linear Stream Array */
720/* Interval - period between requests to an endpoint - 125u increments. */ 720/* Interval - period between requests to an endpoint - 125u increments. */
721#define EP_INTERVAL(p) (((p) & 0xff) << 16) 721#define EP_INTERVAL(p) (((p) & 0xff) << 16)
722#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) 722#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
723#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) 723#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
724#define EP_MAXPSTREAMS_MASK (0x1f << 10) 724#define EP_MAXPSTREAMS_MASK (0x1f << 10)
725#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 725#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
726#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10)
726/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ 727/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
727#define EP_HAS_LSA (1 << 15) 728#define EP_HAS_LSA (1 << 15)
728/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ 729/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
@@ -1825,6 +1826,7 @@ struct xhci_hcd {
1825#define XHCI_U2_DISABLE_WAKE (1 << 27) 1826#define XHCI_U2_DISABLE_WAKE (1 << 27)
1826#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) 1827#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1827#define XHCI_HW_LPM_DISABLE (1 << 29) 1828#define XHCI_HW_LPM_DISABLE (1 << 29)
1829#define XHCI_SUSPEND_DELAY (1 << 30)
1828 1830
1829 unsigned int num_active_eps; 1831 unsigned int num_active_eps;
1830 unsigned int limit_active_eps; 1832 unsigned int limit_active_eps;
@@ -2549,21 +2551,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
2549 u8 burst; 2551 u8 burst;
2550 u8 cerr; 2552 u8 cerr;
2551 u8 mult; 2553 u8 mult;
2552 u8 lsa; 2554
2553 u8 hid; 2555 bool lsa;
2556 bool hid;
2554 2557
2555 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | 2558 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
2556 CTX_TO_MAX_ESIT_PAYLOAD(tx_info); 2559 CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
2557 2560
2558 ep_state = info & EP_STATE_MASK; 2561 ep_state = info & EP_STATE_MASK;
2559 max_pstr = info & EP_MAXPSTREAMS_MASK; 2562 max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
2560 interval = CTX_TO_EP_INTERVAL(info); 2563 interval = CTX_TO_EP_INTERVAL(info);
2561 mult = CTX_TO_EP_MULT(info) + 1; 2564 mult = CTX_TO_EP_MULT(info) + 1;
2562 lsa = info & EP_HAS_LSA; 2565 lsa = !!(info & EP_HAS_LSA);
2563 2566
2564 cerr = (info2 & (3 << 1)) >> 1; 2567 cerr = (info2 & (3 << 1)) >> 1;
2565 ep_type = CTX_TO_EP_TYPE(info2); 2568 ep_type = CTX_TO_EP_TYPE(info2);
2566 hid = info2 & (1 << 7); 2569 hid = !!(info2 & (1 << 7));
2567 burst = CTX_TO_MAX_BURST(info2); 2570 burst = CTX_TO_MAX_BURST(info2);
2568 maxp = MAX_PACKET_DECODED(info2); 2571 maxp = MAX_PACKET_DECODED(info2);
2569 2572
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index f5e1bb5e5217..984f7e12a6a5 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -85,6 +85,8 @@ struct mon_reader_text {
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 int printf_size; 87 int printf_size;
88 size_t printf_offset;
89 size_t printf_togo;
88 char *printf_buf; 90 char *printf_buf;
89 struct mutex printf_lock; 91 struct mutex printf_lock;
90 92
@@ -376,75 +378,103 @@ err_alloc:
376 return rc; 378 return rc;
377} 379}
378 380
379/* 381static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
380 * For simplicity, we read one record in one system call and throw out 382 char __user * const buf, const size_t nbytes)
381 * what does not fit. This means that the following does not work: 383{
382 * dd if=/dbg/usbmon/0t bs=10 384 const size_t togo = min(nbytes, rp->printf_togo);
383 * Also, we do not allow seeks and do not bother advancing the offset. 385
384 */ 386 if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
387 return -EFAULT;
388 rp->printf_togo -= togo;
389 rp->printf_offset += togo;
390 return togo;
391}
392
393/* ppos is not advanced since the llseek operation is not permitted. */
385static ssize_t mon_text_read_t(struct file *file, char __user *buf, 394static ssize_t mon_text_read_t(struct file *file, char __user *buf,
386 size_t nbytes, loff_t *ppos) 395 size_t nbytes, loff_t *ppos)
387{ 396{
388 struct mon_reader_text *rp = file->private_data; 397 struct mon_reader_text *rp = file->private_data;
389 struct mon_event_text *ep; 398 struct mon_event_text *ep;
390 struct mon_text_ptr ptr; 399 struct mon_text_ptr ptr;
400 ssize_t ret;
391 401
392 ep = mon_text_read_wait(rp, file);
393 if (IS_ERR(ep))
394 return PTR_ERR(ep);
395 mutex_lock(&rp->printf_lock); 402 mutex_lock(&rp->printf_lock);
396 ptr.cnt = 0; 403
397 ptr.pbuf = rp->printf_buf; 404 if (rp->printf_togo == 0) {
398 ptr.limit = rp->printf_size; 405
399 406 ep = mon_text_read_wait(rp, file);
400 mon_text_read_head_t(rp, &ptr, ep); 407 if (IS_ERR(ep)) {
401 mon_text_read_statset(rp, &ptr, ep); 408 mutex_unlock(&rp->printf_lock);
402 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, 409 return PTR_ERR(ep);
403 " %d", ep->length); 410 }
404 mon_text_read_data(rp, &ptr, ep); 411 ptr.cnt = 0;
405 412 ptr.pbuf = rp->printf_buf;
406 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 413 ptr.limit = rp->printf_size;
407 ptr.cnt = -EFAULT; 414
415 mon_text_read_head_t(rp, &ptr, ep);
416 mon_text_read_statset(rp, &ptr, ep);
417 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
418 " %d", ep->length);
419 mon_text_read_data(rp, &ptr, ep);
420
421 rp->printf_togo = ptr.cnt;
422 rp->printf_offset = 0;
423
424 kmem_cache_free(rp->e_slab, ep);
425 }
426
427 ret = mon_text_copy_to_user(rp, buf, nbytes);
408 mutex_unlock(&rp->printf_lock); 428 mutex_unlock(&rp->printf_lock);
409 kmem_cache_free(rp->e_slab, ep); 429 return ret;
410 return ptr.cnt;
411} 430}
412 431
432/* ppos is not advanced since the llseek operation is not permitted. */
413static ssize_t mon_text_read_u(struct file *file, char __user *buf, 433static ssize_t mon_text_read_u(struct file *file, char __user *buf,
414 size_t nbytes, loff_t *ppos) 434 size_t nbytes, loff_t *ppos)
415{ 435{
416 struct mon_reader_text *rp = file->private_data; 436 struct mon_reader_text *rp = file->private_data;
417 struct mon_event_text *ep; 437 struct mon_event_text *ep;
418 struct mon_text_ptr ptr; 438 struct mon_text_ptr ptr;
439 ssize_t ret;
419 440
420 ep = mon_text_read_wait(rp, file);
421 if (IS_ERR(ep))
422 return PTR_ERR(ep);
423 mutex_lock(&rp->printf_lock); 441 mutex_lock(&rp->printf_lock);
424 ptr.cnt = 0;
425 ptr.pbuf = rp->printf_buf;
426 ptr.limit = rp->printf_size;
427 442
428 mon_text_read_head_u(rp, &ptr, ep); 443 if (rp->printf_togo == 0) {
429 if (ep->type == 'E') { 444
430 mon_text_read_statset(rp, &ptr, ep); 445 ep = mon_text_read_wait(rp, file);
431 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { 446 if (IS_ERR(ep)) {
432 mon_text_read_isostat(rp, &ptr, ep); 447 mutex_unlock(&rp->printf_lock);
433 mon_text_read_isodesc(rp, &ptr, ep); 448 return PTR_ERR(ep);
434 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { 449 }
435 mon_text_read_intstat(rp, &ptr, ep); 450 ptr.cnt = 0;
436 } else { 451 ptr.pbuf = rp->printf_buf;
437 mon_text_read_statset(rp, &ptr, ep); 452 ptr.limit = rp->printf_size;
453
454 mon_text_read_head_u(rp, &ptr, ep);
455 if (ep->type == 'E') {
456 mon_text_read_statset(rp, &ptr, ep);
457 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
458 mon_text_read_isostat(rp, &ptr, ep);
459 mon_text_read_isodesc(rp, &ptr, ep);
460 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
461 mon_text_read_intstat(rp, &ptr, ep);
462 } else {
463 mon_text_read_statset(rp, &ptr, ep);
464 }
465 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
466 " %d", ep->length);
467 mon_text_read_data(rp, &ptr, ep);
468
469 rp->printf_togo = ptr.cnt;
470 rp->printf_offset = 0;
471
472 kmem_cache_free(rp->e_slab, ep);
438 } 473 }
439 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
440 " %d", ep->length);
441 mon_text_read_data(rp, &ptr, ep);
442 474
443 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 475 ret = mon_text_copy_to_user(rp, buf, nbytes);
444 ptr.cnt = -EFAULT;
445 mutex_unlock(&rp->printf_lock); 476 mutex_unlock(&rp->printf_lock);
446 kmem_cache_free(rp->e_slab, ep); 477 return ret;
447 return ptr.cnt;
448} 478}
449 479
450static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, 480static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index eef4ad578b31..4d723077be2b 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1756,6 +1756,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1756 int vbus; 1756 int vbus;
1757 u8 devctl; 1757 u8 devctl;
1758 1758
1759 pm_runtime_get_sync(dev);
1759 spin_lock_irqsave(&musb->lock, flags); 1760 spin_lock_irqsave(&musb->lock, flags);
1760 val = musb->a_wait_bcon; 1761 val = musb->a_wait_bcon;
1761 vbus = musb_platform_get_vbus_status(musb); 1762 vbus = musb_platform_get_vbus_status(musb);
@@ -1769,6 +1770,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1769 vbus = 0; 1770 vbus = 0;
1770 } 1771 }
1771 spin_unlock_irqrestore(&musb->lock, flags); 1772 spin_unlock_irqrestore(&musb->lock, flags);
1773 pm_runtime_put_sync(dev);
1772 1774
1773 return sprintf(buf, "Vbus %s, timeout %lu msec\n", 1775 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1774 vbus ? "on" : "off", val); 1776 vbus ? "on" : "off", val);
@@ -2471,11 +2473,11 @@ static int musb_remove(struct platform_device *pdev)
2471 musb_disable_interrupts(musb); 2473 musb_disable_interrupts(musb);
2472 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2474 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2473 spin_unlock_irqrestore(&musb->lock, flags); 2475 spin_unlock_irqrestore(&musb->lock, flags);
2476 musb_platform_exit(musb);
2474 2477
2475 pm_runtime_dont_use_autosuspend(musb->controller); 2478 pm_runtime_dont_use_autosuspend(musb->controller);
2476 pm_runtime_put_sync(musb->controller); 2479 pm_runtime_put_sync(musb->controller);
2477 pm_runtime_disable(musb->controller); 2480 pm_runtime_disable(musb->controller);
2478 musb_platform_exit(musb);
2479 musb_phy_callback = NULL; 2481 musb_phy_callback = NULL;
2480 if (musb->dma_controller) 2482 if (musb->dma_controller)
2481 musb_dma_controller_destroy(musb->dma_controller); 2483 musb_dma_controller_destroy(musb->dma_controller);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 3b1b9695177a..6034c39b67d1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf)
1076 return 0; 1076 return 0;
1077 1077
1078 err = uas_configure_endpoints(devinfo); 1078 err = uas_configure_endpoints(devinfo);
1079 if (err && err != ENODEV) 1079 if (err && err != -ENODEV)
1080 shost_printk(KERN_ERR, shost, 1080 shost_printk(KERN_ERR, shost,
1081 "%s: alloc streams error %d after reset", 1081 "%s: alloc streams error %d after reset",
1082 __func__, err); 1082 __func__, err);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 264af199aec8..747d3a9596d9 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2118,6 +2118,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2118 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2118 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2119 US_FL_BROKEN_FUA ), 2119 US_FL_BROKEN_FUA ),
2120 2120
2121/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
2122UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
2123 "JMicron",
2124 "USB to ATA/ATAPI Bridge",
2125 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2126 US_FL_BROKEN_FUA ),
2127
2121/* Reported-by George Cherian <george.cherian@cavium.com> */ 2128/* Reported-by George Cherian <george.cherian@cavium.com> */
2122UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, 2129UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
2123 "JMicron", 2130 "JMicron",
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index 9ce4756adad6..dcd8ef085b30 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -1857,7 +1857,8 @@ static int fusb302_probe(struct i2c_client *client,
1857 chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); 1857 chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
1858 if (IS_ERR(chip->tcpm_port)) { 1858 if (IS_ERR(chip->tcpm_port)) {
1859 ret = PTR_ERR(chip->tcpm_port); 1859 ret = PTR_ERR(chip->tcpm_port);
1860 dev_err(dev, "cannot register tcpm port, ret=%d", ret); 1860 if (ret != -EPROBE_DEFER)
1861 dev_err(dev, "cannot register tcpm port, ret=%d", ret);
1861 goto destroy_workqueue; 1862 goto destroy_workqueue;
1862 } 1863 }
1863 1864
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index f4d563ee7690..8b637a4b474b 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -252,9 +252,6 @@ struct tcpm_port {
252 unsigned int nr_src_pdo; 252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS]; 253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo; 254 unsigned int nr_snk_pdo;
255 unsigned int nr_fixed; /* number of fixed sink PDOs */
256 unsigned int nr_var; /* number of variable sink PDOs */
257 unsigned int nr_batt; /* number of battery sink PDOs */
258 u32 snk_vdo[VDO_MAX_OBJECTS]; 255 u32 snk_vdo[VDO_MAX_OBJECTS];
259 unsigned int nr_snk_vdo; 256 unsigned int nr_snk_vdo;
260 257
@@ -1770,90 +1767,39 @@ static int tcpm_pd_check_request(struct tcpm_port *port)
1770 return 0; 1767 return 0;
1771} 1768}
1772 1769
1773#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) 1770static int tcpm_pd_select_pdo(struct tcpm_port *port)
1774#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
1775
1776static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
1777 int *src_pdo)
1778{ 1771{
1779 unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0; 1772 unsigned int i, max_mw = 0, max_mv = 0;
1780 int ret = -EINVAL; 1773 int ret = -EINVAL;
1781 1774
1782 /* 1775 /*
1783 * Select the source PDO providing the most power which has a 1776 * Select the source PDO providing the most power while staying within
1784 * matchig sink cap. 1777 * the board's voltage limits. Prefer PDO providing exp
1785 */ 1778 */
1786 for (i = 0; i < port->nr_source_caps; i++) { 1779 for (i = 0; i < port->nr_source_caps; i++) {
1787 u32 pdo = port->source_caps[i]; 1780 u32 pdo = port->source_caps[i];
1788 enum pd_pdo_type type = pdo_type(pdo); 1781 enum pd_pdo_type type = pdo_type(pdo);
1782 unsigned int mv, ma, mw;
1789 1783
1790 if (type == PDO_TYPE_FIXED) { 1784 if (type == PDO_TYPE_FIXED)
1791 for (j = 0; j < port->nr_fixed; j++) { 1785 mv = pdo_fixed_voltage(pdo);
1792 if (pdo_fixed_voltage(pdo) == 1786 else
1793 pdo_fixed_voltage(port->snk_pdo[j])) { 1787 mv = pdo_min_voltage(pdo);
1794 ma = min_current(pdo, port->snk_pdo[j]); 1788
1795 mv = pdo_fixed_voltage(pdo); 1789 if (type == PDO_TYPE_BATT) {
1796 mw = ma * mv / 1000; 1790 mw = pdo_max_power(pdo);
1797 if (mw > max_mw || 1791 } else {
1798 (mw == max_mw && mv > max_mv)) { 1792 ma = min(pdo_max_current(pdo),
1799 ret = 0; 1793 port->max_snk_ma);
1800 *src_pdo = i; 1794 mw = ma * mv / 1000;
1801 *sink_pdo = j; 1795 }
1802 max_mw = mw; 1796
1803 max_mv = mv; 1797 /* Perfer higher voltages if available */
1804 } 1798 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1805 /* There could only be one fixed pdo 1799 mv <= port->max_snk_mv) {
1806 * at a specific voltage level. 1800 ret = i;
1807 * So breaking here. 1801 max_mw = mw;
1808 */ 1802 max_mv = mv;
1809 break;
1810 }
1811 }
1812 } else if (type == PDO_TYPE_BATT) {
1813 for (j = port->nr_fixed;
1814 j < port->nr_fixed +
1815 port->nr_batt;
1816 j++) {
1817 if (pdo_min_voltage(pdo) >=
1818 pdo_min_voltage(port->snk_pdo[j]) &&
1819 pdo_max_voltage(pdo) <=
1820 pdo_max_voltage(port->snk_pdo[j])) {
1821 mw = min_power(pdo, port->snk_pdo[j]);
1822 mv = pdo_min_voltage(pdo);
1823 if (mw > max_mw ||
1824 (mw == max_mw && mv > max_mv)) {
1825 ret = 0;
1826 *src_pdo = i;
1827 *sink_pdo = j;
1828 max_mw = mw;
1829 max_mv = mv;
1830 }
1831 }
1832 }
1833 } else if (type == PDO_TYPE_VAR) {
1834 for (j = port->nr_fixed +
1835 port->nr_batt;
1836 j < port->nr_fixed +
1837 port->nr_batt +
1838 port->nr_var;
1839 j++) {
1840 if (pdo_min_voltage(pdo) >=
1841 pdo_min_voltage(port->snk_pdo[j]) &&
1842 pdo_max_voltage(pdo) <=
1843 pdo_max_voltage(port->snk_pdo[j])) {
1844 ma = min_current(pdo, port->snk_pdo[j]);
1845 mv = pdo_min_voltage(pdo);
1846 mw = ma * mv / 1000;
1847 if (mw > max_mw ||
1848 (mw == max_mw && mv > max_mv)) {
1849 ret = 0;
1850 *src_pdo = i;
1851 *sink_pdo = j;
1852 max_mw = mw;
1853 max_mv = mv;
1854 }
1855 }
1856 }
1857 } 1803 }
1858 } 1804 }
1859 1805
@@ -1865,14 +1811,13 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1865 unsigned int mv, ma, mw, flags; 1811 unsigned int mv, ma, mw, flags;
1866 unsigned int max_ma, max_mw; 1812 unsigned int max_ma, max_mw;
1867 enum pd_pdo_type type; 1813 enum pd_pdo_type type;
1868 int src_pdo_index, snk_pdo_index; 1814 int index;
1869 u32 pdo, matching_snk_pdo; 1815 u32 pdo;
1870 1816
1871 if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0) 1817 index = tcpm_pd_select_pdo(port);
1818 if (index < 0)
1872 return -EINVAL; 1819 return -EINVAL;
1873 1820 pdo = port->source_caps[index];
1874 pdo = port->source_caps[src_pdo_index];
1875 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
1876 type = pdo_type(pdo); 1821 type = pdo_type(pdo);
1877 1822
1878 if (type == PDO_TYPE_FIXED) 1823 if (type == PDO_TYPE_FIXED)
@@ -1880,28 +1825,26 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1880 else 1825 else
1881 mv = pdo_min_voltage(pdo); 1826 mv = pdo_min_voltage(pdo);
1882 1827
1883 /* Select maximum available current within the sink pdo's limit */ 1828 /* Select maximum available current within the board's power limit */
1884 if (type == PDO_TYPE_BATT) { 1829 if (type == PDO_TYPE_BATT) {
1885 mw = min_power(pdo, matching_snk_pdo); 1830 mw = pdo_max_power(pdo);
1886 ma = 1000 * mw / mv; 1831 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1887 } else { 1832 } else {
1888 ma = min_current(pdo, matching_snk_pdo); 1833 ma = min(pdo_max_current(pdo),
1889 mw = ma * mv / 1000; 1834 1000 * port->max_snk_mw / mv);
1890 } 1835 }
1836 ma = min(ma, port->max_snk_ma);
1891 1837
1892 flags = RDO_USB_COMM | RDO_NO_SUSPEND; 1838 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1893 1839
1894 /* Set mismatch bit if offered power is less than operating power */ 1840 /* Set mismatch bit if offered power is less than operating power */
1841 mw = ma * mv / 1000;
1895 max_ma = ma; 1842 max_ma = ma;
1896 max_mw = mw; 1843 max_mw = mw;
1897 if (mw < port->operating_snk_mw) { 1844 if (mw < port->operating_snk_mw) {
1898 flags |= RDO_CAP_MISMATCH; 1845 flags |= RDO_CAP_MISMATCH;
1899 if (type == PDO_TYPE_BATT && 1846 max_mw = port->operating_snk_mw;
1900 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) 1847 max_ma = max_mw * 1000 / mv;
1901 max_mw = pdo_max_power(matching_snk_pdo);
1902 else if (pdo_max_current(matching_snk_pdo) >
1903 pdo_max_current(pdo))
1904 max_ma = pdo_max_current(matching_snk_pdo);
1905 } 1848 }
1906 1849
1907 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", 1850 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
@@ -1910,16 +1853,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1910 port->polarity); 1853 port->polarity);
1911 1854
1912 if (type == PDO_TYPE_BATT) { 1855 if (type == PDO_TYPE_BATT) {
1913 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); 1856 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1914 1857
1915 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", 1858 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1916 src_pdo_index, mv, mw, 1859 index, mv, mw,
1917 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 1860 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1918 } else { 1861 } else {
1919 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); 1862 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1920 1863
1921 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", 1864 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1922 src_pdo_index, mv, ma, 1865 index, mv, ma,
1923 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 1866 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1924 } 1867 }
1925 1868
@@ -3650,19 +3593,6 @@ int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3650} 3593}
3651EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); 3594EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3652 3595
3653static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo,
3654 enum pd_pdo_type type)
3655{
3656 int count = 0;
3657 int i;
3658
3659 for (i = 0; i < nr_pdo; i++) {
3660 if (pdo_type(pdo[i]) == type)
3661 count++;
3662 }
3663 return count;
3664}
3665
3666struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) 3596struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3667{ 3597{
3668 struct tcpm_port *port; 3598 struct tcpm_port *port;
@@ -3708,15 +3638,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3708 tcpc->config->nr_src_pdo); 3638 tcpc->config->nr_src_pdo);
3709 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3639 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3710 tcpc->config->nr_snk_pdo); 3640 tcpc->config->nr_snk_pdo);
3711 port->nr_fixed = nr_type_pdos(port->snk_pdo,
3712 port->nr_snk_pdo,
3713 PDO_TYPE_FIXED);
3714 port->nr_var = nr_type_pdos(port->snk_pdo,
3715 port->nr_snk_pdo,
3716 PDO_TYPE_VAR);
3717 port->nr_batt = nr_type_pdos(port->snk_pdo,
3718 port->nr_snk_pdo,
3719 PDO_TYPE_BATT);
3720 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, 3641 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3721 tcpc->config->nr_snk_vdo); 3642 tcpc->config->nr_snk_vdo);
3722 3643
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index d86f72bbbb91..6dcd3ff655c3 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -105,10 +105,14 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
105 if (rv != 0) 105 if (rv != 0)
106 return -EINVAL; 106 return -EINVAL;
107 107
108 if (!udc) {
109 dev_err(dev, "no device");
110 return -ENODEV;
111 }
108 spin_lock_irqsave(&udc->lock, flags); 112 spin_lock_irqsave(&udc->lock, flags);
109 /* Don't export what we don't have */ 113 /* Don't export what we don't have */
110 if (!udc || !udc->driver || !udc->pullup) { 114 if (!udc->driver || !udc->pullup) {
111 dev_err(dev, "no device or gadget not bound"); 115 dev_err(dev, "gadget not bound");
112 ret = -ENODEV; 116 ret = -ENODEV;
113 goto unlock; 117 goto unlock;
114 } 118 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index e30e29ae4819..45657e2b1ff7 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
338{ 338{
339 struct page *page[1]; 339 struct page *page[1];
340 struct vm_area_struct *vma; 340 struct vm_area_struct *vma;
341 struct vm_area_struct *vmas[1];
341 int ret; 342 int ret;
342 343
343 if (mm == current->mm) { 344 if (mm == current->mm) {
344 ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), 345 ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
345 page); 346 page, vmas);
346 } else { 347 } else {
347 unsigned int flags = 0; 348 unsigned int flags = 0;
348 349
@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
351 352
352 down_read(&mm->mmap_sem); 353 down_read(&mm->mmap_sem);
353 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, 354 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
354 NULL, NULL); 355 vmas, NULL);
356 /*
357 * The lifetime of a vaddr_get_pfn() page pin is
358 * userspace-controlled. In the fs-dax case this could
359 * lead to indefinite stalls in filesystem operations.
360 * Disallow attempts to pin fs-dax pages via this
361 * interface.
362 */
363 if (ret > 0 && vma_is_fsdax(vmas[0])) {
364 ret = -EOPNOTSUPP;
365 put_page(page[0]);
366 }
355 up_read(&mm->mmap_sem); 367 up_read(&mm->mmap_sem);
356 } 368 }
357 369
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index af6fc97f4ba4..a436d44f1b7f 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
122 unsigned char __user *ured; 122 unsigned char __user *ured;
123 unsigned char __user *ugreen; 123 unsigned char __user *ugreen;
124 unsigned char __user *ublue; 124 unsigned char __user *ublue;
125 int index, count, i; 125 unsigned int index, count, i;
126 126
127 if (get_user(index, &c->index) || 127 if (get_user(index, &c->index) ||
128 __get_user(count, &c->count) || 128 __get_user(count, &c->count) ||
@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
161 unsigned char __user *ugreen; 161 unsigned char __user *ugreen;
162 unsigned char __user *ublue; 162 unsigned char __user *ublue;
163 struct fb_cmap *cmap = &info->cmap; 163 struct fb_cmap *cmap = &info->cmap;
164 int index, count, i; 164 unsigned int index, count, i;
165 u8 red, green, blue; 165 u8 red, green, blue;
166 166
167 if (get_user(index, &c->index) || 167 if (get_user(index, &c->index) ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index eb30f3e09a47..71458f493cf8 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -428,8 +428,6 @@ unmap_release:
428 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); 428 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
429 } 429 }
430 430
431 vq->vq.num_free += total_sg;
432
433 if (indirect) 431 if (indirect)
434 kfree(desc); 432 kfree(desc);
435 433
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index aff773bcebdb..37460cd6cabb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG
226config RAVE_SP_WATCHDOG 226config RAVE_SP_WATCHDOG
227 tristate "RAVE SP Watchdog timer" 227 tristate "RAVE SP Watchdog timer"
228 depends on RAVE_SP_CORE 228 depends on RAVE_SP_CORE
229 depends on NVMEM || !NVMEM
229 select WATCHDOG_CORE 230 select WATCHDOG_CORE
230 help 231 help
231 Support for the watchdog on RAVE SP device. 232 Support for the watchdog on RAVE SP device.
@@ -903,6 +904,7 @@ config F71808E_WDT
903config SP5100_TCO 904config SP5100_TCO
904 tristate "AMD/ATI SP5100 TCO Timer/Watchdog" 905 tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
905 depends on X86 && PCI 906 depends on X86 && PCI
907 select WATCHDOG_CORE
906 ---help--- 908 ---help---
907 Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO 909 Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
908 (Total Cost of Ownership) timer is a watchdog timer that will reboot 910 (Total Cost of Ownership) timer is a watchdog timer that will reboot
@@ -1008,6 +1010,7 @@ config WAFER_WDT
1008config I6300ESB_WDT 1010config I6300ESB_WDT
1009 tristate "Intel 6300ESB Timer/Watchdog" 1011 tristate "Intel 6300ESB Timer/Watchdog"
1010 depends on PCI 1012 depends on PCI
1013 select WATCHDOG_CORE
1011 ---help--- 1014 ---help---
1012 Hardware driver for the watchdog timer built into the Intel 1015 Hardware driver for the watchdog timer built into the Intel
1013 6300ESB controller hub. 1016 6300ESB controller hub.
@@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V
1837config XEN_WDT 1840config XEN_WDT
1838 tristate "Xen Watchdog support" 1841 tristate "Xen Watchdog support"
1839 depends on XEN 1842 depends on XEN
1843 select WATCHDOG_CORE
1840 help 1844 help
1841 Say Y here to support the hypervisor watchdog capability provided 1845 Say Y here to support the hypervisor watchdog capability provided
1842 by Xen 4.0 and newer. The watchdog timeout period is normally one 1846 by Xen 4.0 and newer. The watchdog timeout period is normally one
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index e0678c14480f..3a33c5344bd5 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
566 char c; 566 char c;
567 if (get_user(c, buf + i)) 567 if (get_user(c, buf + i))
568 return -EFAULT; 568 return -EFAULT;
569 expect_close = (c == 'V'); 569 if (c == 'V')
570 expect_close = true;
570 } 571 }
571 572
572 /* Properly order writes across fork()ed processes */ 573 /* Properly order writes across fork()ed processes */
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index f1f00dfc0e68..b0a158073abd 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -28,16 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <linux/watchdog.h> 30#include <linux/watchdog.h>
31#ifdef CONFIG_HPWDT_NMI_DECODING
32#include <linux/dmi.h>
33#include <linux/spinlock.h>
34#include <linux/nmi.h>
35#include <linux/kdebug.h>
36#include <linux/notifier.h>
37#include <asm/set_memory.h>
38#endif /* CONFIG_HPWDT_NMI_DECODING */
39#include <asm/nmi.h> 31#include <asm/nmi.h>
40#include <asm/frame.h>
41 32
42#define HPWDT_VERSION "1.4.0" 33#define HPWDT_VERSION "1.4.0"
43#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) 34#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
@@ -48,6 +39,9 @@
48static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ 39static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
49static unsigned int reload; /* the computed soft_margin */ 40static unsigned int reload; /* the computed soft_margin */
50static bool nowayout = WATCHDOG_NOWAYOUT; 41static bool nowayout = WATCHDOG_NOWAYOUT;
42#ifdef CONFIG_HPWDT_NMI_DECODING
43static unsigned int allow_kdump = 1;
44#endif
51static char expect_release; 45static char expect_release;
52static unsigned long hpwdt_is_open; 46static unsigned long hpwdt_is_open;
53 47
@@ -63,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
63}; 57};
64MODULE_DEVICE_TABLE(pci, hpwdt_devices); 58MODULE_DEVICE_TABLE(pci, hpwdt_devices);
65 59
66#ifdef CONFIG_HPWDT_NMI_DECODING
67#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
68#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
69#define PCI_BIOS32_PARAGRAPH_LEN 16
70#define PCI_ROM_BASE1 0x000F0000
71#define ROM_SIZE 0x10000
72
73struct bios32_service_dir {
74 u32 signature;
75 u32 entry_point;
76 u8 revision;
77 u8 length;
78 u8 checksum;
79 u8 reserved[5];
80};
81
82/* type 212 */
83struct smbios_cru64_info {
84 u8 type;
85 u8 byte_length;
86 u16 handle;
87 u32 signature;
88 u64 physical_address;
89 u32 double_length;
90 u32 double_offset;
91};
92#define SMBIOS_CRU64_INFORMATION 212
93
94/* type 219 */
95struct smbios_proliant_info {
96 u8 type;
97 u8 byte_length;
98 u16 handle;
99 u32 power_features;
100 u32 omega_features;
101 u32 reserved;
102 u32 misc_features;
103};
104#define SMBIOS_ICRU_INFORMATION 219
105
106
107struct cmn_registers {
108 union {
109 struct {
110 u8 ral;
111 u8 rah;
112 u16 rea2;
113 };
114 u32 reax;
115 } u1;
116 union {
117 struct {
118 u8 rbl;
119 u8 rbh;
120 u8 reb2l;
121 u8 reb2h;
122 };
123 u32 rebx;
124 } u2;
125 union {
126 struct {
127 u8 rcl;
128 u8 rch;
129 u16 rec2;
130 };
131 u32 recx;
132 } u3;
133 union {
134 struct {
135 u8 rdl;
136 u8 rdh;
137 u16 red2;
138 };
139 u32 redx;
140 } u4;
141
142 u32 resi;
143 u32 redi;
144 u16 rds;
145 u16 res;
146 u32 reflags;
147} __attribute__((packed));
148
149static unsigned int hpwdt_nmi_decoding;
150static unsigned int allow_kdump = 1;
151static unsigned int is_icru;
152static unsigned int is_uefi;
153static DEFINE_SPINLOCK(rom_lock);
154static void *cru_rom_addr;
155static struct cmn_registers cmn_regs;
156
157extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
158 unsigned long *pRomEntry);
159
160#ifdef CONFIG_X86_32
161/* --32 Bit Bios------------------------------------------------------------ */
162
163#define HPWDT_ARCH 32
164
165asm(".text \n\t"
166 ".align 4 \n\t"
167 ".globl asminline_call \n"
168 "asminline_call: \n\t"
169 "pushl %ebp \n\t"
170 "movl %esp, %ebp \n\t"
171 "pusha \n\t"
172 "pushf \n\t"
173 "push %es \n\t"
174 "push %ds \n\t"
175 "pop %es \n\t"
176 "movl 8(%ebp),%eax \n\t"
177 "movl 4(%eax),%ebx \n\t"
178 "movl 8(%eax),%ecx \n\t"
179 "movl 12(%eax),%edx \n\t"
180 "movl 16(%eax),%esi \n\t"
181 "movl 20(%eax),%edi \n\t"
182 "movl (%eax),%eax \n\t"
183 "push %cs \n\t"
184 "call *12(%ebp) \n\t"
185 "pushf \n\t"
186 "pushl %eax \n\t"
187 "movl 8(%ebp),%eax \n\t"
188 "movl %ebx,4(%eax) \n\t"
189 "movl %ecx,8(%eax) \n\t"
190 "movl %edx,12(%eax) \n\t"
191 "movl %esi,16(%eax) \n\t"
192 "movl %edi,20(%eax) \n\t"
193 "movw %ds,24(%eax) \n\t"
194 "movw %es,26(%eax) \n\t"
195 "popl %ebx \n\t"
196 "movl %ebx,(%eax) \n\t"
197 "popl %ebx \n\t"
198 "movl %ebx,28(%eax) \n\t"
199 "pop %es \n\t"
200 "popf \n\t"
201 "popa \n\t"
202 "leave \n\t"
203 "ret \n\t"
204 ".previous");
205
206
207/*
208 * cru_detect
209 *
210 * Routine Description:
211 * This function uses the 32-bit BIOS Service Directory record to
212 * search for a $CRU record.
213 *
214 * Return Value:
215 * 0 : SUCCESS
216 * <0 : FAILURE
217 */
218static int cru_detect(unsigned long map_entry,
219 unsigned long map_offset)
220{
221 void *bios32_map;
222 unsigned long *bios32_entrypoint;
223 unsigned long cru_physical_address;
224 unsigned long cru_length;
225 unsigned long physical_bios_base = 0;
226 unsigned long physical_bios_offset = 0;
227 int retval = -ENODEV;
228
229 bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
230
231 if (bios32_map == NULL)
232 return -ENODEV;
233
234 bios32_entrypoint = bios32_map + map_offset;
235
236 cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
237
238 set_memory_x((unsigned long)bios32_map, 2);
239 asminline_call(&cmn_regs, bios32_entrypoint);
240
241 if (cmn_regs.u1.ral != 0) {
242 pr_warn("Call succeeded but with an error: 0x%x\n",
243 cmn_regs.u1.ral);
244 } else {
245 physical_bios_base = cmn_regs.u2.rebx;
246 physical_bios_offset = cmn_regs.u4.redx;
247 cru_length = cmn_regs.u3.recx;
248 cru_physical_address =
249 physical_bios_base + physical_bios_offset;
250
251 /* If the values look OK, then map it in. */
252 if ((physical_bios_base + physical_bios_offset)) {
253 cru_rom_addr =
254 ioremap(cru_physical_address, cru_length);
255 if (cru_rom_addr) {
256 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
257 (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
258 retval = 0;
259 }
260 }
261
262 pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
263 pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
264 pr_debug("CRU Length: 0x%lx\n", cru_length);
265 pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
266 }
267 iounmap(bios32_map);
268 return retval;
269}
270
271/*
272 * bios_checksum
273 */
274static int bios_checksum(const char __iomem *ptr, int len)
275{
276 char sum = 0;
277 int i;
278
279 /*
280 * calculate checksum of size bytes. This should add up
281 * to zero if we have a valid header.
282 */
283 for (i = 0; i < len; i++)
284 sum += ptr[i];
285
286 return ((sum == 0) && (len > 0));
287}
288
289/*
290 * bios32_present
291 *
292 * Routine Description:
293 * This function finds the 32-bit BIOS Service Directory
294 *
295 * Return Value:
296 * 0 : SUCCESS
297 * <0 : FAILURE
298 */
299static int bios32_present(const char __iomem *p)
300{
301 struct bios32_service_dir *bios_32_ptr;
302 int length;
303 unsigned long map_entry, map_offset;
304
305 bios_32_ptr = (struct bios32_service_dir *) p;
306
307 /*
308 * Search for signature by checking equal to the swizzled value
309 * instead of calling another routine to perform a strcmp.
310 */
311 if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
312 length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
313 if (bios_checksum(p, length)) {
314 /*
315 * According to the spec, we're looking for the
316 * first 4KB-aligned address below the entrypoint
317 * listed in the header. The Service Directory code
318 * is guaranteed to occupy no more than 2 4KB pages.
319 */
320 map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
321 map_offset = bios_32_ptr->entry_point - map_entry;
322
323 return cru_detect(map_entry, map_offset);
324 }
325 }
326 return -ENODEV;
327}
328
329static int detect_cru_service(void)
330{
331 char __iomem *p, *q;
332 int rc = -1;
333
334 /*
335 * Search from 0x0f0000 through 0x0fffff, inclusive.
336 */
337 p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
338 if (p == NULL)
339 return -ENOMEM;
340
341 for (q = p; q < p + ROM_SIZE; q += 16) {
342 rc = bios32_present(q);
343 if (!rc)
344 break;
345 }
346 iounmap(p);
347 return rc;
348}
349/* ------------------------------------------------------------------------- */
350#endif /* CONFIG_X86_32 */
351#ifdef CONFIG_X86_64
352/* --64 Bit Bios------------------------------------------------------------ */
353
354#define HPWDT_ARCH 64
355
356asm(".text \n\t"
357 ".align 4 \n\t"
358 ".globl asminline_call \n\t"
359 ".type asminline_call, @function \n\t"
360 "asminline_call: \n\t"
361 FRAME_BEGIN
362 "pushq %rax \n\t"
363 "pushq %rbx \n\t"
364 "pushq %rdx \n\t"
365 "pushq %r12 \n\t"
366 "pushq %r9 \n\t"
367 "movq %rsi, %r12 \n\t"
368 "movq %rdi, %r9 \n\t"
369 "movl 4(%r9),%ebx \n\t"
370 "movl 8(%r9),%ecx \n\t"
371 "movl 12(%r9),%edx \n\t"
372 "movl 16(%r9),%esi \n\t"
373 "movl 20(%r9),%edi \n\t"
374 "movl (%r9),%eax \n\t"
375 "call *%r12 \n\t"
376 "pushfq \n\t"
377 "popq %r12 \n\t"
378 "movl %eax, (%r9) \n\t"
379 "movl %ebx, 4(%r9) \n\t"
380 "movl %ecx, 8(%r9) \n\t"
381 "movl %edx, 12(%r9) \n\t"
382 "movl %esi, 16(%r9) \n\t"
383 "movl %edi, 20(%r9) \n\t"
384 "movq %r12, %rax \n\t"
385 "movl %eax, 28(%r9) \n\t"
386 "popq %r9 \n\t"
387 "popq %r12 \n\t"
388 "popq %rdx \n\t"
389 "popq %rbx \n\t"
390 "popq %rax \n\t"
391 FRAME_END
392 "ret \n\t"
393 ".previous");
394
395/*
396 * dmi_find_cru
397 *
398 * Routine Description:
399 * This function checks whether or not a SMBIOS/DMI record is
400 * the 64bit CRU info or not
401 */
402static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
403{
404 struct smbios_cru64_info *smbios_cru64_ptr;
405 unsigned long cru_physical_address;
406
407 if (dm->type == SMBIOS_CRU64_INFORMATION) {
408 smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
409 if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
410 cru_physical_address =
411 smbios_cru64_ptr->physical_address +
412 smbios_cru64_ptr->double_offset;
413 cru_rom_addr = ioremap(cru_physical_address,
414 smbios_cru64_ptr->double_length);
415 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
416 smbios_cru64_ptr->double_length >> PAGE_SHIFT);
417 }
418 }
419}
420
421static int detect_cru_service(void)
422{
423 cru_rom_addr = NULL;
424
425 dmi_walk(dmi_find_cru, NULL);
426
427 /* if cru_rom_addr has been set then we found a CRU service */
428 return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
429}
430/* ------------------------------------------------------------------------- */
431#endif /* CONFIG_X86_64 */
432#endif /* CONFIG_HPWDT_NMI_DECODING */
433 60
434/* 61/*
435 * Watchdog operations 62 * Watchdog operations
@@ -486,30 +113,12 @@ static int hpwdt_my_nmi(void)
486 */ 113 */
487static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) 114static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
488{ 115{
489 unsigned long rom_pl;
490 static int die_nmi_called;
491
492 if (!hpwdt_nmi_decoding)
493 return NMI_DONE;
494
495 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) 116 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
496 return NMI_DONE; 117 return NMI_DONE;
497 118
498 spin_lock_irqsave(&rom_lock, rom_pl);
499 if (!die_nmi_called && !is_icru && !is_uefi)
500 asminline_call(&cmn_regs, cru_rom_addr);
501 die_nmi_called = 1;
502 spin_unlock_irqrestore(&rom_lock, rom_pl);
503
504 if (allow_kdump) 119 if (allow_kdump)
505 hpwdt_stop(); 120 hpwdt_stop();
506 121
507 if (!is_icru && !is_uefi) {
508 if (cmn_regs.u1.ral == 0) {
509 nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
510 return NMI_HANDLED;
511 }
512 }
513 nmi_panic(regs, "An NMI occurred. Depending on your system the reason " 122 nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
514 "for the NMI is logged in any one of the following " 123 "for the NMI is logged in any one of the following "
515 "resources:\n" 124 "resources:\n"
@@ -675,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
675 * Init & Exit 284 * Init & Exit
676 */ 285 */
677 286
678#ifdef CONFIG_HPWDT_NMI_DECODING
679#ifdef CONFIG_X86_LOCAL_APIC
680static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
681{
682 /*
683 * If nmi_watchdog is turned off then we can turn on
684 * our nmi decoding capability.
685 */
686 hpwdt_nmi_decoding = 1;
687}
688#else
689static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
690{
691 dev_warn(&dev->dev, "NMI decoding is disabled. "
692 "Your kernel does not support a NMI Watchdog.\n");
693}
694#endif /* CONFIG_X86_LOCAL_APIC */
695
696/*
697 * dmi_find_icru
698 *
699 * Routine Description:
700 * This function checks whether or not we are on an iCRU-based server.
701 * This check is independent of architecture and needs to be made for
702 * any ProLiant system.
703 */
704static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
705{
706 struct smbios_proliant_info *smbios_proliant_ptr;
707
708 if (dm->type == SMBIOS_ICRU_INFORMATION) {
709 smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
710 if (smbios_proliant_ptr->misc_features & 0x01)
711 is_icru = 1;
712 if (smbios_proliant_ptr->misc_features & 0x1400)
713 is_uefi = 1;
714 }
715}
716 287
717static int hpwdt_init_nmi_decoding(struct pci_dev *dev) 288static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
718{ 289{
290#ifdef CONFIG_HPWDT_NMI_DECODING
719 int retval; 291 int retval;
720
721 /*
722 * On typical CRU-based systems we need to map that service in
723 * the BIOS. For 32 bit Operating Systems we need to go through
724 * the 32 Bit BIOS Service Directory. For 64 bit Operating
725 * Systems we get that service through SMBIOS.
726 *
727 * On systems that support the new iCRU service all we need to
728 * do is call dmi_walk to get the supported flag value and skip
729 * the old cru detect code.
730 */
731 dmi_walk(dmi_find_icru, NULL);
732 if (!is_icru && !is_uefi) {
733
734 /*
735 * We need to map the ROM to get the CRU service.
736 * For 32 bit Operating Systems we need to go through the 32 Bit
737 * BIOS Service Directory
738 * For 64 bit Operating Systems we get that service through SMBIOS.
739 */
740 retval = detect_cru_service();
741 if (retval < 0) {
742 dev_warn(&dev->dev,
743 "Unable to detect the %d Bit CRU Service.\n",
744 HPWDT_ARCH);
745 return retval;
746 }
747
748 /*
749 * We know this is the only CRU call we need to make so lets keep as
750 * few instructions as possible once the NMI comes in.
751 */
752 cmn_regs.u1.rah = 0x0D;
753 cmn_regs.u1.ral = 0x02;
754 }
755
756 /* 292 /*
757 * Only one function can register for NMI_UNKNOWN 293 * Only one function can register for NMI_UNKNOWN
758 */ 294 */
@@ -780,45 +316,26 @@ error:
780 dev_warn(&dev->dev, 316 dev_warn(&dev->dev,
781 "Unable to register a die notifier (err=%d).\n", 317 "Unable to register a die notifier (err=%d).\n",
782 retval); 318 retval);
783 if (cru_rom_addr)
784 iounmap(cru_rom_addr);
785 return retval; 319 return retval;
320#endif /* CONFIG_HPWDT_NMI_DECODING */
321 return 0;
786} 322}
787 323
788static void hpwdt_exit_nmi_decoding(void) 324static void hpwdt_exit_nmi_decoding(void)
789{ 325{
326#ifdef CONFIG_HPWDT_NMI_DECODING
790 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); 327 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
791 unregister_nmi_handler(NMI_SERR, "hpwdt"); 328 unregister_nmi_handler(NMI_SERR, "hpwdt");
792 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); 329 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
793 if (cru_rom_addr) 330#endif
794 iounmap(cru_rom_addr);
795}
796#else /* !CONFIG_HPWDT_NMI_DECODING */
797static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
798{
799}
800
801static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
802{
803 return 0;
804} 331}
805 332
806static void hpwdt_exit_nmi_decoding(void)
807{
808}
809#endif /* CONFIG_HPWDT_NMI_DECODING */
810
811static int hpwdt_init_one(struct pci_dev *dev, 333static int hpwdt_init_one(struct pci_dev *dev,
812 const struct pci_device_id *ent) 334 const struct pci_device_id *ent)
813{ 335{
814 int retval; 336 int retval;
815 337
816 /* 338 /*
817 * Check if we can do NMI decoding or not
818 */
819 hpwdt_check_nmi_decoding(dev);
820
821 /*
822 * First let's find out if we are on an iLO2+ server. We will 339 * First let's find out if we are on an iLO2+ server. We will
823 * not run on a legacy ASM box. 340 * not run on a legacy ASM box.
824 * So we only support the G5 ProLiant servers and higher. 341 * So we only support the G5 ProLiant servers and higher.
@@ -922,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
922#ifdef CONFIG_HPWDT_NMI_DECODING 439#ifdef CONFIG_HPWDT_NMI_DECODING
923module_param(allow_kdump, int, 0); 440module_param(allow_kdump, int, 0);
924MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); 441MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
925#endif /* !CONFIG_HPWDT_NMI_DECODING */ 442#endif /* CONFIG_HPWDT_NMI_DECODING */
926 443
927module_pci_driver(hpwdt_driver); 444module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index 316c2eb122d2..e8bd9887c566 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -50,6 +50,7 @@
50 */ 50 */
51 51
52#include <linux/io.h> 52#include <linux/io.h>
53#include <linux/io-64-nonatomic-lo-hi.h>
53#include <linux/interrupt.h> 54#include <linux/interrupt.h>
54#include <linux/module.h> 55#include <linux/module.h>
55#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
159 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) 160 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
160 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); 161 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
161 162
162 timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - 163 timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
163 arch_counter_get_cntvct(); 164 arch_counter_get_cntvct();
164 165
165 do_div(timeleft, gwdt->clk); 166 do_div(timeleft, gwdt->clk);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1ab4bd11f5f3..762378f1811c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -755,8 +755,8 @@ out:
755 mutex_unlock(&irq_mapping_update_lock); 755 mutex_unlock(&irq_mapping_update_lock);
756 return irq; 756 return irq;
757error_irq: 757error_irq:
758 for (; i >= 0; i--) 758 while (nvec--)
759 __unbind_from_irq(irq + i); 759 __unbind_from_irq(irq + nvec);
760 mutex_unlock(&irq_mapping_update_lock); 760 mutex_unlock(&irq_mapping_update_lock);
761 return ret; 761 return ret;
762} 762}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 156e5aea36db..b1092fbefa63 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
416 sock); 416 sock);
417 if (!map) { 417 if (!map) {
418 ret = -EFAULT; 418 ret = -EFAULT;
419 sock_release(map->sock); 419 sock_release(sock);
420 } 420 }
421 421
422out: 422out:
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index aedbee3b2838..2f11ca72a281 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -73,20 +73,25 @@ struct sock_mapping {
73 wait_queue_head_t inflight_conn_req; 73 wait_queue_head_t inflight_conn_req;
74 } active; 74 } active;
75 struct { 75 struct {
76 /* Socket status */ 76 /*
77 * Socket status, needs to be 64-bit aligned due to the
78 * test_and_* functions which have this requirement on arm64.
79 */
77#define PVCALLS_STATUS_UNINITALIZED 0 80#define PVCALLS_STATUS_UNINITALIZED 0
78#define PVCALLS_STATUS_BIND 1 81#define PVCALLS_STATUS_BIND 1
79#define PVCALLS_STATUS_LISTEN 2 82#define PVCALLS_STATUS_LISTEN 2
80 uint8_t status; 83 uint8_t status __attribute__((aligned(8)));
81 /* 84 /*
82 * Internal state-machine flags. 85 * Internal state-machine flags.
83 * Only one accept operation can be inflight for a socket. 86 * Only one accept operation can be inflight for a socket.
84 * Only one poll operation can be inflight for a given socket. 87 * Only one poll operation can be inflight for a given socket.
88 * flags needs to be 64-bit aligned due to the test_and_*
89 * functions which have this requirement on arm64.
85 */ 90 */
86#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 91#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
87#define PVCALLS_FLAG_POLL_INFLIGHT 1 92#define PVCALLS_FLAG_POLL_INFLIGHT 1
88#define PVCALLS_FLAG_POLL_RET 2 93#define PVCALLS_FLAG_POLL_RET 2
89 uint8_t flags; 94 uint8_t flags __attribute__((aligned(8)));
90 uint32_t inflight_req_id; 95 uint32_t inflight_req_id;
91 struct sock_mapping *accept_map; 96 struct sock_mapping *accept_map;
92 wait_queue_head_t inflight_accept_req; 97 wait_queue_head_t inflight_accept_req;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 74888cacd0b0..ec9eb4fba59c 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
466 466
467 /* Register with generic device framework. */ 467 /* Register with generic device framework. */
468 err = device_register(&xendev->dev); 468 err = device_register(&xendev->dev);
469 if (err) 469 if (err) {
470 put_device(&xendev->dev);
471 xendev = NULL;
470 goto fail; 472 goto fail;
473 }
471 474
472 return 0; 475 return 0;
473fail: 476fail: