summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io6
-rw-r--r--Documentation/core-api/xarray.rst15
-rw-r--r--Documentation/devicetree/bindings/Makefile6
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt1
-rw-r--r--Documentation/devicetree/bindings/serio/olpc,ap-sp.txt4
-rw-r--r--Documentation/sysctl/fs.txt26
-rw-r--r--Documentation/x86/resctrl_ui.txt2
-rw-r--r--MAINTAINERS72
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/Kbuild4
-rw-r--r--arch/arc/include/asm/arcregs.h12
-rw-r--r--arch/arc/include/asm/bitops.h6
-rw-r--r--arch/arc/include/asm/perf_event.h3
-rw-r--r--arch/arc/kernel/perf_event.c241
-rw-r--r--arch/arc/kernel/setup.c27
-rw-r--r--arch/arc/kernel/troubleshoot.c30
-rw-r--r--arch/arc/lib/memset-archs.S40
-rw-r--r--arch/arc/mm/fault.c13
-rw-r--r--arch/arc/mm/init.c3
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h94
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c4
-rw-r--r--arch/arm64/include/asm/device.h3
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h76
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/kaslr.c1
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/flush.c6
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/configs/defconfig8
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/entry.S18
-rw-r--r--arch/riscv/kernel/setup.c2
-rw-r--r--arch/riscv/kernel/smpboot.c6
-rw-r--r--arch/riscv/mm/init.c3
-rw-r--r--arch/s390/include/asm/mmu_context.h7
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c11
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/boot/compressed/pgtable.h2
-rw-r--r--arch/x86/entry/entry_64_compat.S6
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h18
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/resctrl_sched.h4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c5
-rw-r--r--arch/x86/kernel/kvm.c7
-rw-r--r--arch/x86/kernel/tsc.c30
-rw-r--r--arch/x86/kvm/Makefile4
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/svm.c31
-rw-r--r--arch/x86/kvm/trace.h2
-rw-r--r--arch/x86/kvm/vmx/evmcs.c7
-rw-r--r--arch/x86/kvm/vmx/nested.c15
-rw-r--r--arch/x86/kvm/vmx/vmx.c150
-rw-r--r--arch/x86/kvm/x86.c15
-rw-r--r--arch/x86/lib/iomem.c33
-rw-r--r--arch/x86/lib/kaslr.c4
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c4
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/boot/dts/Makefile6
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig2
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig3
-rw-r--r--arch/xtensa/kernel/head.S5
-rw-r--r--arch/xtensa/kernel/smp.c41
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-mq-debugfs.c3
-rw-r--r--block/blk-wbt.c4
-rw-r--r--drivers/acpi/nfit/core.c66
-rw-r--r--drivers/android/binderfs.c282
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/sata_inic162x.c22
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c173
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c25
-rw-r--r--drivers/char/mwave/mwavedd.c7
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c16
-rw-r--r--drivers/clk/imx/clk-frac-pll.c5
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c4
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c14
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c20
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c4
-rw-r--r--drivers/clk/ti/divider.c11
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c10
-rw-r--r--drivers/edac/altera_edac.h4
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/firmware/efi/arm-runtime.c5
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c14
-rw-r--r--drivers/gpio/gpio-pcf857x.c26
-rw-r--r--drivers/gpio/gpio-vf610.c5
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/hid/hid-core.c23
-rw-r--r--drivers/hid/hid-debug.c120
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hv/channel.c9
-rw-r--r--drivers/hv/hv_balloon.c10
-rw-r--r--drivers/hv/ring_buffer.c31
-rw-r--r--drivers/hv/vmbus_drv.c91
-rw-r--r--drivers/i3c/master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c18
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/infiniband/core/core_priv.h1
-rw-r--r--drivers/infiniband/core/device.c13
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c25
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c8
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/misc/uinput.c5
-rw-r--r--drivers/input/serio/olpc_apsp.c17
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c4
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c25
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c40
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c6
-rw-r--r--drivers/md/dm-crypt.c25
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin-metadata.h2
-rw-r--r--drivers/md/dm-thin.c10
-rw-r--r--drivers/md/dm.c41
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/misc/ibmvmc.c7
-rw-r--r--drivers/misc/mei/hbm.c12
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/pvpanic.c4
-rw-r--r--drivers/mmc/host/Kconfig4
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c5
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c32
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c5
-rw-r--r--drivers/net/caif/caif_serial.c5
-rw-r--r--drivers/net/can/dev.c27
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c10
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c12
-rw-r--r--drivers/net/ethernet/sfc/ef10.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c15
-rw-r--r--drivers/net/ethernet/sun/cassini.h15
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h12
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c145
-rw-r--r--drivers/net/hyperv/rndis_filter.c36
-rw-r--r--drivers/net/phy/asix.c8
-rw-r--r--drivers/net/phy/mdio-hisi-femac.c16
-rw-r--r--drivers/net/phy/rockchip.c9
-rw-r--r--drivers/net/usb/asix_devices.c9
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/virt_wifi.c4
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/pci.c117
-rw-r--r--drivers/nvme/host/rdma.c64
-rw-r--r--drivers/nvme/host/tcp.c19
-rw-r--r--drivers/nvme/target/rdma.c15
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c16
-rw-r--r--drivers/pci/msi.c39
-rw-r--r--drivers/phy/qualcomm/phy-ath79-usb.c4
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/mediatek/Kconfig3
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c44
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/aacraid/linit.c9
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c4
-rw-r--r--drivers/scsi/csiostor/csio_attr.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/scsi_debug.c41
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1
-rw-r--r--drivers/staging/wilc1000/host_interface.c5
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c7
-rw-r--r--drivers/target/target_core_user.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c30
-rw-r--r--drivers/tty/n_hdlc.c1
-rw-r--r--drivers/tty/serial/8250/8250_core.c17
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c13
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c4
-rw-r--r--drivers/tty/serial/serial_core.c12
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/vt/vt.c50
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c3
-rw-r--r--drivers/usb/core/ledtrig-usbport.c17
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/host/ehci-mv.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/keyspan_usa26msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa28msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa49msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa67msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa90msg.h1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/usbip/README7
-rw-r--r--drivers/vfio/pci/trace.h6
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c36
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/vhost/vhost.c7
-rw-r--r--drivers/vhost/vhost.h4
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/console/vgacon.c7
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--drivers/xen/swiotlb-xen.c4
-rw-r--r--fs/autofs/expire.c3
-rw-r--r--fs/autofs/inode.c4
-rw-r--r--fs/btrfs/ctree.c78
-rw-r--r--fs/btrfs/super.c3
-rw-r--r--fs/btrfs/transaction.c24
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/quota.c13
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifssmb.c35
-rw-r--r--fs/cifs/connect.c21
-rw-r--r--fs/cifs/file.c11
-rw-r--r--fs/cifs/smb2inode.c17
-rw-r--r--fs/cifs/smb2misc.c7
-rw-r--r--fs/cifs/smb2ops.c72
-rw-r--r--fs/cifs/smb2pdu.c77
-rw-r--r--fs/cifs/smb2pdu.h19
-rw-r--r--fs/cifs/trace.c10
-rw-r--r--fs/cifs/trace.h10
-rw-r--r--fs/cifs/transport.c11
-rw-r--r--fs/dcache.c38
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/drop_caches.c8
-rw-r--r--fs/fs-writeback.c40
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/iomap.c37
-rw-r--r--fs/nfs/super.c5
-rw-r--r--fs/nfs/write.c9
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--fs/notify/inotify/inotify_user.c6
-rw-r--r--fs/proc/generic.c4
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/proc_net.c20
-rw-r--r--fs/proc/stat.c29
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/linux/backing-dev-defs.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/hid-debug.h9
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hyperv.h5
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/interrupt.h34
-rw-r--r--include/linux/irqdesc.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/libnvdimm.h1
-rw-r--r--include/linux/memory_hotplug.h18
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/sched/wake_q.h6
-rw-r--r--include/linux/xarray.h227
-rw-r--r--include/net/ax25.h12
-rw-r--r--include/net/tls.h2
-rw-r--r--include/rdma/ib_verbs.h24
-rw-r--r--include/sound/compress_driver.h6
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/uapi/linux/android/binderfs.h (renamed from include/uapi/linux/android/binder_ctl.h)10
-rw-r--r--include/uapi/linux/blkzoned.h1
-rw-r--r--include/uapi/linux/input.h6
-rw-r--r--include/uapi/linux/virtio_config.h6
-rw-r--r--include/uapi/linux/virtio_ring.h10
-rw-r--r--include/uapi/rdma/hns-abi.h5
-rw-r--r--include/xen/arm/page-coherent.h97
-rw-r--r--init/Kconfig13
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/events/core.c14
-rw-r--r--kernel/exit.c14
-rw-r--r--kernel/futex.c13
-rw-r--r--kernel/irq/affinity.c121
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/debugfs.c2
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/internals.h8
-rw-r--r--kernel/irq/irqdesc.c9
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/kthread.c8
-rw-r--r--kernel/locking/rwsem-xadd.c11
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/fair.c1
-rw-r--r--kernel/sched/psi.c21
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/time/posix-cpu-timers.c1
-rw-r--r--kernel/trace/trace_uprobe.c9
-rw-r--r--kernel/workqueue.c23
-rw-r--r--kernel/workqueue_internal.h6
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_xarray.c57
-rw-r--r--lib/xarray.c92
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/kasan/Makefile1
-rw-r--r--mm/memory-failure.c3
-rw-r--r--mm/memory_hotplug.c62
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/mincore.c94
-rw-r--r--mm/oom_kill.c12
-rw-r--r--mm/page_alloc.c12
-rw-r--r--net/ax25/ax25_ip.c4
-rw-r--r--net/ax25/ax25_route.c19
-rw-r--r--net/bridge/netfilter/ebtables.c9
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/ceph/messenger.c5
-rw-r--r--net/core/dev.c3
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/ipv4/gre_demux.c17
-rw-r--r--net/ipv4/ip_gre.c9
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/ip_vti.c50
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/ipv6/ip6_gre.c4
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/mac80211/cfg.c4
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c12
-rw-r--r--net/netfilter/nfnetlink_osf.c4
-rw-r--r--net/netfilter/nft_compat.c189
-rw-r--r--net/netrom/nr_timer.c20
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_make_chunk.c11
-rw-r--r--net/sctp/stream.c58
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c105
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
-rw-r--r--net/tls/tls_sw.c6
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c13
-rw-r--r--net/xfrm/xfrm_policy.c63
-rw-r--r--net/xfrm/xfrm_user.c13
-rw-r--r--security/apparmor/domain.c5
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--sound/core/compress_offload.c3
-rw-r--r--sound/core/pcm_lib.c9
-rw-r--r--sound/pci/hda/hda_bind.c3
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c4
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c140
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c6
-rw-r--r--sound/soc/codecs/hdac_hdmi.c116
-rw-r--r--sound/soc/codecs/pcm512x.c11
-rw-r--r--sound/soc/codecs/rt274.c5
-rw-r--r--sound/soc/codecs/rt5514-spi.c2
-rw-r--r--sound/soc/codecs/rt5682.c1
-rw-r--r--sound/soc/codecs/rt5682.h24
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c4
-rw-r--r--sound/soc/fsl/imx-audmux.c24
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c8
-rw-r--r--sound/soc/intel/boards/broadwell.c2
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c45
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/skylake/skl.c13
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c17
-rw-r--r--sound/soc/qcom/sdm845.c31
-rw-r--r--sound/soc/sh/dma-sh7760.c2
-rw-r--r--sound/soc/soc-core.c34
-rw-r--r--sound/soc/soc-dapm.c10
-rw-r--r--sound/soc/ti/davinci-mcasp.c136
-rw-r--r--sound/soc/xilinx/Kconfig2
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c15
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/perf/builtin-script.c9
-rw-r--r--tools/perf/ui/browsers/annotate.c16
-rw-r--r--tools/perf/util/cpumap.c11
-rw-r--r--tools/perf/util/ordered-events.c6
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/testing/nvdimm/dimm_devs.c4
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh13
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c9
-rw-r--r--tools/testing/selftests/ir/Makefile2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c6
-rw-r--r--tools/testing/selftests/net/Makefile2
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh153
-rw-r--r--tools/testing/selftests/proc/.gitignore1
-rw-r--r--tools/testing/selftests/proc/Makefile1
-rw-r--r--tools/testing/selftests/proc/setns-dcache.c129
-rw-r--r--tools/testing/selftests/rtc/rtctest.c109
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c82
-rw-r--r--tools/testing/selftests/timers/Makefile2
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c1
-rw-r--r--tools/testing/selftests/x86/mpx-mini-test.c2
-rw-r--r--tools/testing/selftests/x86/protection_keys.c41
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c1
-rw-r--r--virt/kvm/kvm_main.c3
540 files changed, 5344 insertions, 2994 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 9b642669cb16..169fe08a649b 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
24 cpld3_version 24 cpld3_version
25 25
26Date: November 2018 26Date: November 2018
27KernelVersion: 4.21 27KernelVersion: 5.0
28Contact: Vadim Pasternak <vadimpmellanox.com> 28Contact: Vadim Pasternak <vadimpmellanox.com>
29Description: These files show with which CPLD versions have been burned 29Description: These files show with which CPLD versions have been burned
30 on LED board. 30 on LED board.
@@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
35 jtag_enable 35 jtag_enable
36 36
37Date: November 2018 37Date: November 2018
38KernelVersion: 4.21 38KernelVersion: 5.0
39Contact: Vadim Pasternak <vadimpmellanox.com> 39Contact: Vadim Pasternak <vadimpmellanox.com>
40Description: These files enable and disable the access to the JTAG domain. 40Description: These files enable and disable the access to the JTAG domain.
41 By default access to the JTAG domain is disabled. 41 By default access to the JTAG domain is disabled.
@@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
105 reset_voltmon_upgrade_fail 105 reset_voltmon_upgrade_fail
106 106
107Date: November 2018 107Date: November 2018
108KernelVersion: 4.21 108KernelVersion: 5.0
109Contact: Vadim Pasternak <vadimpmellanox.com> 109Contact: Vadim Pasternak <vadimpmellanox.com>
110Description: These files show the system reset cause, as following: ComEx 110Description: These files show the system reset cause, as following: ComEx
111 power fail, reset from ComEx, system platform reset, reset 111 power fail, reset from ComEx, system platform reset, reset
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index 6a6d67acaf69..5d54b27c6eba 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -108,12 +108,13 @@ some, but not all of the other indices changing.
108 108
109Sometimes you need to ensure that a subsequent call to :c:func:`xa_store` 109Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
110will not need to allocate memory. The :c:func:`xa_reserve` function 110will not need to allocate memory. The :c:func:`xa_reserve` function
111will store a reserved entry at the indicated index. Users of the normal 111will store a reserved entry at the indicated index. Users of the
112API will see this entry as containing ``NULL``. If you do not need to 112normal API will see this entry as containing ``NULL``. If you do
113use the reserved entry, you can call :c:func:`xa_release` to remove the 113not need to use the reserved entry, you can call :c:func:`xa_release`
114unused entry. If another user has stored to the entry in the meantime, 114to remove the unused entry. If another user has stored to the entry
115:c:func:`xa_release` will do nothing; if instead you want the entry to 115in the meantime, :c:func:`xa_release` will do nothing; if instead you
116become ``NULL``, you should use :c:func:`xa_erase`. 116want the entry to become ``NULL``, you should use :c:func:`xa_erase`.
117Using :c:func:`xa_insert` on a reserved entry will fail.
117 118
118If all entries in the array are ``NULL``, the :c:func:`xa_empty` function 119If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
119will return ``true``. 120will return ``true``.
@@ -183,6 +184,8 @@ Takes xa_lock internally:
183 * :c:func:`xa_store_bh` 184 * :c:func:`xa_store_bh`
184 * :c:func:`xa_store_irq` 185 * :c:func:`xa_store_irq`
185 * :c:func:`xa_insert` 186 * :c:func:`xa_insert`
187 * :c:func:`xa_insert_bh`
188 * :c:func:`xa_insert_irq`
186 * :c:func:`xa_erase` 189 * :c:func:`xa_erase`
187 * :c:func:`xa_erase_bh` 190 * :c:func:`xa_erase_bh`
188 * :c:func:`xa_erase_irq` 191 * :c:func:`xa_erase_irq`
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6e5cef0ed6fb..50daa0b3b032 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA)
17quiet_cmd_mk_schema = SCHEMA $@ 17quiet_cmd_mk_schema = SCHEMA $@
18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) 18 cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^)
19 19
20DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') 20DT_DOCS = $(shell \
21 cd $(srctree)/$(src) && \
22 find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \
23 )
24
21DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 25DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
22 26
23extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) 27extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index ac8df3b871f9..f8759145ce1a 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -27,7 +27,6 @@ Example:
27 reg = <0x04300000 0x20000>; 27 reg = <0x04300000 0x20000>;
28 reg-names = "kgsl_3d0_reg_memory"; 28 reg-names = "kgsl_3d0_reg_memory";
29 interrupts = <GIC_SPI 80 0>; 29 interrupts = <GIC_SPI 80 0>;
30 interrupt-names = "kgsl_3d0_irq";
31 clock-names = 30 clock-names =
32 "core", 31 "core",
33 "iface", 32 "iface",
diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
index 36603419d6f8..0e72183f52bc 100644
--- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
+++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt
@@ -4,14 +4,10 @@ Required properties:
4- compatible : "olpc,ap-sp" 4- compatible : "olpc,ap-sp"
5- reg : base address and length of SoC's WTM registers 5- reg : base address and length of SoC's WTM registers
6- interrupts : SP-AP interrupt 6- interrupts : SP-AP interrupt
7- clocks : phandle + clock-specifier for the clock that drives the WTM
8- clock-names: should be "sp"
9 7
10Example: 8Example:
11 ap-sp@d4290000 { 9 ap-sp@d4290000 {
12 compatible = "olpc,ap-sp"; 10 compatible = "olpc,ap-sp";
13 reg = <0xd4290000 0x1000>; 11 reg = <0xd4290000 0x1000>;
14 interrupts = <40>; 12 interrupts = <40>;
15 clocks = <&soc_clocks MMP2_CLK_SP>;
16 clock-names = "sp";
17 } 13 }
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 819caf8ca05f..58649bd4fcfc 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -56,26 +56,32 @@ of any kernel data structures.
56 56
57dentry-state: 57dentry-state:
58 58
59From linux/fs/dentry.c: 59From linux/include/linux/dcache.h:
60-------------------------------------------------------------- 60--------------------------------------------------------------
61struct { 61struct dentry_stat_t dentry_stat {
62 int nr_dentry; 62 int nr_dentry;
63 int nr_unused; 63 int nr_unused;
64 int age_limit; /* age in seconds */ 64 int age_limit; /* age in seconds */
65 int want_pages; /* pages requested by system */ 65 int want_pages; /* pages requested by system */
66 int dummy[2]; 66 int nr_negative; /* # of unused negative dentries */
67} dentry_stat = {0, 0, 45, 0,}; 67 int dummy; /* Reserved for future use */
68-------------------------------------------------------------- 68};
69 69--------------------------------------------------------------
70Dentries are dynamically allocated and deallocated, and 70
71nr_dentry seems to be 0 all the time. Hence it's safe to 71Dentries are dynamically allocated and deallocated.
72assume that only nr_unused, age_limit and want_pages are 72
73used. Nr_unused seems to be exactly what its name says. 73nr_dentry shows the total number of dentries allocated (active
74+ unused). nr_unused shows the number of dentries that are not
75actively used, but are saved in the LRU list for future reuse.
76
74Age_limit is the age in seconds after which dcache entries 77Age_limit is the age in seconds after which dcache entries
75can be reclaimed when memory is short and want_pages is 78can be reclaimed when memory is short and want_pages is
76nonzero when shrink_dcache_pages() has been called and the 79nonzero when shrink_dcache_pages() has been called and the
77dcache isn't pruned yet. 80dcache isn't pruned yet.
78 81
82nr_negative shows the number of unused dentries that are also
83negative dentries which do not mapped to actual files.
84
79============================================================== 85==============================================================
80 86
81dquot-max & dquot-nr: 87dquot-max & dquot-nr:
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt
index e8e8d14d3c4e..c1f95b59e14d 100644
--- a/Documentation/x86/resctrl_ui.txt
+++ b/Documentation/x86/resctrl_ui.txt
@@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
9Tony Luck <tony.luck@intel.com> 9Tony Luck <tony.luck@intel.com>
10Vikas Shivappa <vikas.shivappa@intel.com> 10Vikas Shivappa <vikas.shivappa@intel.com>
11 11
12This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo 12This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
13flag bits: 13flag bits:
14RDT (Resource Director Technology) Allocation - "rdt_a" 14RDT (Resource Director Technology) Allocation - "rdt_a"
15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" 15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
diff --git a/MAINTAINERS b/MAINTAINERS
index 51029a425dbe..8c68de3cfd80 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3052,8 +3052,8 @@ F: include/linux/bcm963xx_nvram.h
3052F: include/linux/bcm963xx_tag.h 3052F: include/linux/bcm963xx_tag.h
3053 3053
3054BROADCOM BNX2 GIGABIT ETHERNET DRIVER 3054BROADCOM BNX2 GIGABIT ETHERNET DRIVER
3055M: Rasesh Mody <rasesh.mody@cavium.com> 3055M: Rasesh Mody <rmody@marvell.com>
3056M: Dept-GELinuxNICDev@cavium.com 3056M: GR-Linux-NIC-Dev@marvell.com
3057L: netdev@vger.kernel.org 3057L: netdev@vger.kernel.org
3058S: Supported 3058S: Supported
3059F: drivers/net/ethernet/broadcom/bnx2.* 3059F: drivers/net/ethernet/broadcom/bnx2.*
@@ -3072,9 +3072,9 @@ S: Supported
3072F: drivers/scsi/bnx2i/ 3072F: drivers/scsi/bnx2i/
3073 3073
3074BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER 3074BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
3075M: Ariel Elior <ariel.elior@cavium.com> 3075M: Ariel Elior <aelior@marvell.com>
3076M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> 3076M: Sudarsana Kalluru <skalluru@marvell.com>
3077M: everest-linux-l2@cavium.com 3077M: GR-everest-linux-l2@marvell.com
3078L: netdev@vger.kernel.org 3078L: netdev@vger.kernel.org
3079S: Supported 3079S: Supported
3080F: drivers/net/ethernet/broadcom/bnx2x/ 3080F: drivers/net/ethernet/broadcom/bnx2x/
@@ -3249,9 +3249,9 @@ S: Supported
3249F: drivers/scsi/bfa/ 3249F: drivers/scsi/bfa/
3250 3250
3251BROCADE BNA 10 GIGABIT ETHERNET DRIVER 3251BROCADE BNA 10 GIGABIT ETHERNET DRIVER
3252M: Rasesh Mody <rasesh.mody@cavium.com> 3252M: Rasesh Mody <rmody@marvell.com>
3253M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com> 3253M: Sudarsana Kalluru <skalluru@marvell.com>
3254M: Dept-GELinuxNICDev@cavium.com 3254M: GR-Linux-NIC-Dev@marvell.com
3255L: netdev@vger.kernel.org 3255L: netdev@vger.kernel.org
3256S: Supported 3256S: Supported
3257F: drivers/net/ethernet/brocade/bna/ 3257F: drivers/net/ethernet/brocade/bna/
@@ -3978,6 +3978,7 @@ F: drivers/cpufreq/arm_big_little.c
3978CPU POWER MONITORING SUBSYSTEM 3978CPU POWER MONITORING SUBSYSTEM
3979M: Thomas Renninger <trenn@suse.com> 3979M: Thomas Renninger <trenn@suse.com>
3980M: Shuah Khan <shuah@kernel.org> 3980M: Shuah Khan <shuah@kernel.org>
3981M: Shuah Khan <skhan@linuxfoundation.org>
3981L: linux-pm@vger.kernel.org 3982L: linux-pm@vger.kernel.org
3982S: Maintained 3983S: Maintained
3983F: tools/power/cpupower/ 3984F: tools/power/cpupower/
@@ -8258,6 +8259,7 @@ F: include/uapi/linux/sunrpc/
8258 8259
8259KERNEL SELFTEST FRAMEWORK 8260KERNEL SELFTEST FRAMEWORK
8260M: Shuah Khan <shuah@kernel.org> 8261M: Shuah Khan <shuah@kernel.org>
8262M: Shuah Khan <skhan@linuxfoundation.org>
8261L: linux-kselftest@vger.kernel.org 8263L: linux-kselftest@vger.kernel.org
8262T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git 8264T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
8263Q: https://patchwork.kernel.org/project/linux-kselftest/list/ 8265Q: https://patchwork.kernel.org/project/linux-kselftest/list/
@@ -10688,9 +10690,9 @@ S: Maintained
10688F: drivers/net/netdevsim/* 10690F: drivers/net/netdevsim/*
10689 10691
10690NETXEN (1/10) GbE SUPPORT 10692NETXEN (1/10) GbE SUPPORT
10691M: Manish Chopra <manish.chopra@cavium.com> 10693M: Manish Chopra <manishc@marvell.com>
10692M: Rahul Verma <rahul.verma@cavium.com> 10694M: Rahul Verma <rahulv@marvell.com>
10693M: Dept-GELinuxNICDev@cavium.com 10695M: GR-Linux-NIC-Dev@marvell.com
10694L: netdev@vger.kernel.org 10696L: netdev@vger.kernel.org
10695S: Supported 10697S: Supported
10696F: drivers/net/ethernet/qlogic/netxen/ 10698F: drivers/net/ethernet/qlogic/netxen/
@@ -12474,8 +12476,8 @@ S: Supported
12474F: drivers/scsi/qedi/ 12476F: drivers/scsi/qedi/
12475 12477
12476QLOGIC QL4xxx ETHERNET DRIVER 12478QLOGIC QL4xxx ETHERNET DRIVER
12477M: Ariel Elior <Ariel.Elior@cavium.com> 12479M: Ariel Elior <aelior@marvell.com>
12478M: everest-linux-l2@cavium.com 12480M: GR-everest-linux-l2@marvell.com
12479L: netdev@vger.kernel.org 12481L: netdev@vger.kernel.org
12480S: Supported 12482S: Supported
12481F: drivers/net/ethernet/qlogic/qed/ 12483F: drivers/net/ethernet/qlogic/qed/
@@ -12483,8 +12485,8 @@ F: include/linux/qed/
12483F: drivers/net/ethernet/qlogic/qede/ 12485F: drivers/net/ethernet/qlogic/qede/
12484 12486
12485QLOGIC QL4xxx RDMA DRIVER 12487QLOGIC QL4xxx RDMA DRIVER
12486M: Michal Kalderon <Michal.Kalderon@cavium.com> 12488M: Michal Kalderon <mkalderon@marvell.com>
12487M: Ariel Elior <Ariel.Elior@cavium.com> 12489M: Ariel Elior <aelior@marvell.com>
12488L: linux-rdma@vger.kernel.org 12490L: linux-rdma@vger.kernel.org
12489S: Supported 12491S: Supported
12490F: drivers/infiniband/hw/qedr/ 12492F: drivers/infiniband/hw/qedr/
@@ -12504,7 +12506,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
12504F: drivers/scsi/qla2xxx/ 12506F: drivers/scsi/qla2xxx/
12505 12507
12506QLOGIC QLA3XXX NETWORK DRIVER 12508QLOGIC QLA3XXX NETWORK DRIVER
12507M: Dept-GELinuxNICDev@cavium.com 12509M: GR-Linux-NIC-Dev@marvell.com
12508L: netdev@vger.kernel.org 12510L: netdev@vger.kernel.org
12509S: Supported 12511S: Supported
12510F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx 12512F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
@@ -12518,16 +12520,16 @@ F: Documentation/scsi/LICENSE.qla4xxx
12518F: drivers/scsi/qla4xxx/ 12520F: drivers/scsi/qla4xxx/
12519 12521
12520QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 12522QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
12521M: Shahed Shaikh <Shahed.Shaikh@cavium.com> 12523M: Shahed Shaikh <shshaikh@marvell.com>
12522M: Manish Chopra <manish.chopra@cavium.com> 12524M: Manish Chopra <manishc@marvell.com>
12523M: Dept-GELinuxNICDev@cavium.com 12525M: GR-Linux-NIC-Dev@marvell.com
12524L: netdev@vger.kernel.org 12526L: netdev@vger.kernel.org
12525S: Supported 12527S: Supported
12526F: drivers/net/ethernet/qlogic/qlcnic/ 12528F: drivers/net/ethernet/qlogic/qlcnic/
12527 12529
12528QLOGIC QLGE 10Gb ETHERNET DRIVER 12530QLOGIC QLGE 10Gb ETHERNET DRIVER
12529M: Manish Chopra <manish.chopra@cavium.com> 12531M: Manish Chopra <manishc@marvell.com>
12530M: Dept-GELinuxNICDev@cavium.com 12532M: GR-Linux-NIC-Dev@marvell.com
12531L: netdev@vger.kernel.org 12533L: netdev@vger.kernel.org
12532S: Supported 12534S: Supported
12533F: drivers/net/ethernet/qlogic/qlge/ 12535F: drivers/net/ethernet/qlogic/qlge/
@@ -15841,6 +15843,7 @@ F: drivers/usb/common/usb-otg-fsm.c
15841USB OVER IP DRIVER 15843USB OVER IP DRIVER
15842M: Valentina Manea <valentina.manea.m@gmail.com> 15844M: Valentina Manea <valentina.manea.m@gmail.com>
15843M: Shuah Khan <shuah@kernel.org> 15845M: Shuah Khan <shuah@kernel.org>
15846M: Shuah Khan <skhan@linuxfoundation.org>
15844L: linux-usb@vger.kernel.org 15847L: linux-usb@vger.kernel.org
15845S: Maintained 15848S: Maintained
15846F: Documentation/usb/usbip_protocol.txt 15849F: Documentation/usb/usbip_protocol.txt
@@ -16638,6 +16641,15 @@ S: Maintained
16638F: drivers/platform/x86/ 16641F: drivers/platform/x86/
16639F: drivers/platform/olpc/ 16642F: drivers/platform/olpc/
16640 16643
16644X86 PLATFORM DRIVERS - ARCH
16645R: Darren Hart <dvhart@infradead.org>
16646R: Andy Shevchenko <andy@infradead.org>
16647L: platform-driver-x86@vger.kernel.org
16648L: x86@kernel.org
16649T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
16650S: Maintained
16651F: arch/x86/platform
16652
16641X86 VDSO 16653X86 VDSO
16642M: Andy Lutomirski <luto@kernel.org> 16654M: Andy Lutomirski <luto@kernel.org>
16643L: linux-kernel@vger.kernel.org 16655L: linux-kernel@vger.kernel.org
@@ -16670,6 +16682,24 @@ T: git git://linuxtv.org/media_tree.git
16670S: Maintained 16682S: Maintained
16671F: drivers/media/tuners/tuner-xc2028.* 16683F: drivers/media/tuners/tuner-xc2028.*
16672 16684
16685XDP (eXpress Data Path)
16686M: Alexei Starovoitov <ast@kernel.org>
16687M: Daniel Borkmann <daniel@iogearbox.net>
16688M: David S. Miller <davem@davemloft.net>
16689M: Jakub Kicinski <jakub.kicinski@netronome.com>
16690M: Jesper Dangaard Brouer <hawk@kernel.org>
16691M: John Fastabend <john.fastabend@gmail.com>
16692L: netdev@vger.kernel.org
16693L: xdp-newbies@vger.kernel.org
16694S: Supported
16695F: net/core/xdp.c
16696F: include/net/xdp.h
16697F: kernel/bpf/devmap.c
16698F: kernel/bpf/cpumap.c
16699F: include/trace/events/xdp.h
16700K: xdp
16701N: xdp
16702
16673XDP SOCKETS (AF_XDP) 16703XDP SOCKETS (AF_XDP)
16674M: Björn Töpel <bjorn.topel@intel.com> 16704M: Björn Töpel <bjorn.topel@intel.com>
16675M: Magnus Karlsson <magnus.karlsson@intel.com> 16705M: Magnus Karlsson <magnus.karlsson@intel.com>
diff --git a/Makefile b/Makefile
index f5b1d0d168e0..3142e67d03f1 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 0 3PATCHLEVEL = 0
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc3 5EXTRAVERSION = -rc5
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index feed50ce89fa..caa270261521 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -3,23 +3,19 @@ generic-y += bugs.h
3generic-y += compat.h 3generic-y += compat.h
4generic-y += device.h 4generic-y += device.h
5generic-y += div64.h 5generic-y += div64.h
6generic-y += dma-mapping.h
7generic-y += emergency-restart.h 6generic-y += emergency-restart.h
8generic-y += extable.h 7generic-y += extable.h
9generic-y += fb.h
10generic-y += ftrace.h 8generic-y += ftrace.h
11generic-y += hardirq.h 9generic-y += hardirq.h
12generic-y += hw_irq.h 10generic-y += hw_irq.h
13generic-y += irq_regs.h 11generic-y += irq_regs.h
14generic-y += irq_work.h 12generic-y += irq_work.h
15generic-y += kmap_types.h
16generic-y += local.h 13generic-y += local.h
17generic-y += local64.h 14generic-y += local64.h
18generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h 16generic-y += mm-arch-hooks.h
20generic-y += msi.h 17generic-y += msi.h
21generic-y += parport.h 18generic-y += parport.h
22generic-y += pci.h
23generic-y += percpu.h 19generic-y += percpu.h
24generic-y += preempt.h 20generic-y += preempt.h
25generic-y += topology.h 21generic-y += topology.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 49bfbd879caa..f1b86cef0905 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -216,6 +216,14 @@ struct bcr_fp_arcv2 {
216#endif 216#endif
217}; 217};
218 218
219struct bcr_actionpoint {
220#ifdef CONFIG_CPU_BIG_ENDIAN
221 unsigned int pad:21, min:1, num:2, ver:8;
222#else
223 unsigned int ver:8, num:2, min:1, pad:21;
224#endif
225};
226
219#include <soc/arc/timers.h> 227#include <soc/arc/timers.h>
220 228
221struct bcr_bpu_arcompact { 229struct bcr_bpu_arcompact {
@@ -283,7 +291,7 @@ struct cpuinfo_arc_cache {
283}; 291};
284 292
285struct cpuinfo_arc_bpu { 293struct cpuinfo_arc_bpu {
286 unsigned int ver, full, num_cache, num_pred; 294 unsigned int ver, full, num_cache, num_pred, ret_stk;
287}; 295};
288 296
289struct cpuinfo_arc_ccm { 297struct cpuinfo_arc_ccm {
@@ -302,7 +310,7 @@ struct cpuinfo_arc {
302 struct { 310 struct {
303 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, 311 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
304 fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, 312 fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
305 debug:1, ap:1, smart:1, rtt:1, pad3:4, 313 ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
306 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; 314 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
307 } extn; 315 } extn;
308 struct bcr_mpy extn_mpy; 316 struct bcr_mpy extn_mpy;
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index ee9246184033..202b74c339f0 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
340/* 340/*
341 * __ffs: Similar to ffs, but zero based (0-31) 341 * __ffs: Similar to ffs, but zero based (0-31)
342 */ 342 */
343static inline __attribute__ ((const)) int __ffs(unsigned long word) 343static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
344{ 344{
345 if (!word) 345 if (!word)
346 return word; 346 return word;
@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
400/* 400/*
401 * __ffs: Similar to ffs, but zero based (0-31) 401 * __ffs: Similar to ffs, but zero based (0-31)
402 */ 402 */
403static inline __attribute__ ((const)) int __ffs(unsigned long x) 403static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
404{ 404{
405 int n; 405 unsigned long n;
406 406
407 asm volatile( 407 asm volatile(
408 " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ 408 " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 9185541035cc..6958545390f0 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
103 103
104 /* counts condition */ 104 /* counts condition */
105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall", 105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
106 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ 106 /* All jump instructions that are taken */
107 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
107 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ 108 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
108#ifdef CONFIG_ISA_ARCV2 109#ifdef CONFIG_ISA_ARCV2
109 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", 110 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8aec462d90fb..861a8aea51f9 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -1,15 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Linux performance counter support for ARC700 series 2//
3 * 3// Linux performance counter support for ARC CPUs.
4 * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) 4// This code is inspired by the perf support of various other architectures.
5 * 5//
6 * This code is inspired by the perf support of various other architectures. 6// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
7 * 7
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/errno.h> 8#include <linux/errno.h>
14#include <linux/interrupt.h> 9#include <linux/interrupt.h>
15#include <linux/module.h> 10#include <linux/module.h>
@@ -19,12 +14,31 @@
19#include <asm/arcregs.h> 14#include <asm/arcregs.h>
20#include <asm/stacktrace.h> 15#include <asm/stacktrace.h>
21 16
17/* HW holds 8 symbols + one for null terminator */
18#define ARCPMU_EVENT_NAME_LEN 9
19
20enum arc_pmu_attr_groups {
21 ARCPMU_ATTR_GR_EVENTS,
22 ARCPMU_ATTR_GR_FORMATS,
23 ARCPMU_NR_ATTR_GR
24};
25
26struct arc_pmu_raw_event_entry {
27 char name[ARCPMU_EVENT_NAME_LEN];
28};
29
22struct arc_pmu { 30struct arc_pmu {
23 struct pmu pmu; 31 struct pmu pmu;
24 unsigned int irq; 32 unsigned int irq;
25 int n_counters; 33 int n_counters;
34 int n_events;
26 u64 max_period; 35 u64 max_period;
27 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; 36 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
37
38 struct arc_pmu_raw_event_entry *raw_entry;
39 struct attribute **attrs;
40 struct perf_pmu_events_attr *attr;
41 const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
28}; 42};
29 43
30struct arc_pmu_cpu { 44struct arc_pmu_cpu {
@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
49{ 63{
50 struct arc_callchain_trace *ctrl = data; 64 struct arc_callchain_trace *ctrl = data;
51 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; 65 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
66
52 perf_callchain_store(entry, addr); 67 perf_callchain_store(entry, addr);
53 68
54 if (ctrl->depth++ < 3) 69 if (ctrl->depth++ < 3)
@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
57 return -1; 72 return -1;
58} 73}
59 74
60void 75void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
61perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 76 struct pt_regs *regs)
62{ 77{
63 struct arc_callchain_trace ctrl = { 78 struct arc_callchain_trace ctrl = {
64 .depth = 0, 79 .depth = 0,
@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
68 arc_unwind_core(NULL, regs, callchain_trace, &ctrl); 83 arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
69} 84}
70 85
71void 86void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
72perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 87 struct pt_regs *regs)
73{ 88{
74 /* 89 /*
75 * User stack can't be unwound trivially with kernel dwarf unwinder 90 * User stack can't be unwound trivially with kernel dwarf unwinder
@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
82static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); 97static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
83 98
84/* read counter #idx; note that counter# != event# on ARC! */ 99/* read counter #idx; note that counter# != event# on ARC! */
85static uint64_t arc_pmu_read_counter(int idx) 100static u64 arc_pmu_read_counter(int idx)
86{ 101{
87 uint32_t tmp; 102 u32 tmp;
88 uint64_t result; 103 u64 result;
89 104
90 /* 105 /*
91 * ARC supports making 'snapshots' of the counters, so we don't 106 * ARC supports making 'snapshots' of the counters, so we don't
@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
94 write_aux_reg(ARC_REG_PCT_INDEX, idx); 109 write_aux_reg(ARC_REG_PCT_INDEX, idx);
95 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 110 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
96 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); 111 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
97 result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; 112 result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
98 result |= read_aux_reg(ARC_REG_PCT_SNAPL); 113 result |= read_aux_reg(ARC_REG_PCT_SNAPL);
99 114
100 return result; 115 return result;
@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
103static void arc_perf_event_update(struct perf_event *event, 118static void arc_perf_event_update(struct perf_event *event,
104 struct hw_perf_event *hwc, int idx) 119 struct hw_perf_event *hwc, int idx)
105{ 120{
106 uint64_t prev_raw_count = local64_read(&hwc->prev_count); 121 u64 prev_raw_count = local64_read(&hwc->prev_count);
107 uint64_t new_raw_count = arc_pmu_read_counter(idx); 122 u64 new_raw_count = arc_pmu_read_counter(idx);
108 int64_t delta = new_raw_count - prev_raw_count; 123 s64 delta = new_raw_count - prev_raw_count;
109 124
110 /* 125 /*
111 * We aren't afraid of hwc->prev_count changing beneath our feet 126 * We aren't afraid of hwc->prev_count changing beneath our feet
@@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
155 int ret; 170 int ret;
156 171
157 if (!is_sampling_event(event)) { 172 if (!is_sampling_event(event)) {
158 hwc->sample_period = arc_pmu->max_period; 173 hwc->sample_period = arc_pmu->max_period;
159 hwc->last_period = hwc->sample_period; 174 hwc->last_period = hwc->sample_period;
160 local64_set(&hwc->period_left, hwc->sample_period); 175 local64_set(&hwc->period_left, hwc->sample_period);
161 } 176 }
@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
192 pr_debug("init cache event with h/w %08x \'%s\'\n", 207 pr_debug("init cache event with h/w %08x \'%s\'\n",
193 (int)hwc->config, arc_pmu_ev_hw_map[ret]); 208 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
194 return 0; 209 return 0;
210
211 case PERF_TYPE_RAW:
212 if (event->attr.config >= arc_pmu->n_events)
213 return -ENOENT;
214
215 hwc->config |= event->attr.config;
216 pr_debug("init raw event with idx %lld \'%s\'\n",
217 event->attr.config,
218 arc_pmu->raw_entry[event->attr.config].name);
219
220 return 0;
221
195 default: 222 default:
196 return -ENOENT; 223 return -ENOENT;
197 } 224 }
@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
200/* starts all counters */ 227/* starts all counters */
201static void arc_pmu_enable(struct pmu *pmu) 228static void arc_pmu_enable(struct pmu *pmu)
202{ 229{
203 uint32_t tmp; 230 u32 tmp;
204 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 231 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
205 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); 232 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
206} 233}
@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
208/* stops all counters */ 235/* stops all counters */
209static void arc_pmu_disable(struct pmu *pmu) 236static void arc_pmu_disable(struct pmu *pmu)
210{ 237{
211 uint32_t tmp; 238 u32 tmp;
212 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 239 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
213 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); 240 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
214} 241}
@@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
228 local64_set(&hwc->period_left, left); 255 local64_set(&hwc->period_left, left);
229 hwc->last_period = period; 256 hwc->last_period = period;
230 overflow = 1; 257 overflow = 1;
231 } else if (unlikely(left <= 0)) { 258 } else if (unlikely(left <= 0)) {
232 /* left underflowed by less than period. */ 259 /* left underflowed by less than period. */
233 left += period; 260 left += period;
234 local64_set(&hwc->period_left, left); 261 local64_set(&hwc->period_left, left);
@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
246 write_aux_reg(ARC_REG_PCT_INDEX, idx); 273 write_aux_reg(ARC_REG_PCT_INDEX, idx);
247 274
248 /* Write value */ 275 /* Write value */
249 write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); 276 write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
250 write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); 277 write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
251 278
252 perf_event_update_userpage(event); 279 perf_event_update_userpage(event);
253 280
@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
277 /* Enable interrupt for this counter */ 304 /* Enable interrupt for this counter */
278 if (is_sampling_event(event)) 305 if (is_sampling_event(event))
279 write_aux_reg(ARC_REG_PCT_INT_CTRL, 306 write_aux_reg(ARC_REG_PCT_INT_CTRL,
280 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 307 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
281 308
282 /* enable ARC pmu here */ 309 /* enable ARC pmu here */
283 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ 310 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
295 * Reset interrupt flag by writing of 1. This is required 322 * Reset interrupt flag by writing of 1. This is required
296 * to make sure pending interrupt was not left. 323 * to make sure pending interrupt was not left.
297 */ 324 */
298 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 325 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
299 write_aux_reg(ARC_REG_PCT_INT_CTRL, 326 write_aux_reg(ARC_REG_PCT_INT_CTRL,
300 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); 327 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
301 } 328 }
302 329
303 if (!(event->hw.state & PERF_HES_STOPPED)) { 330 if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
349 376
350 if (is_sampling_event(event)) { 377 if (is_sampling_event(event)) {
351 /* Mimic full counter overflow as other arches do */ 378 /* Mimic full counter overflow as other arches do */
352 write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); 379 write_aux_reg(ARC_REG_PCT_INT_CNTL,
380 lower_32_bits(arc_pmu->max_period));
353 write_aux_reg(ARC_REG_PCT_INT_CNTH, 381 write_aux_reg(ARC_REG_PCT_INT_CNTH,
354 (arc_pmu->max_period >> 32)); 382 upper_32_bits(arc_pmu->max_period));
355 } 383 }
356 384
357 write_aux_reg(ARC_REG_PCT_CONFIG, 0); 385 write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
392 idx = __ffs(active_ints); 420 idx = __ffs(active_ints);
393 421
394 /* Reset interrupt flag by writing of 1 */ 422 /* Reset interrupt flag by writing of 1 */
395 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 423 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
396 424
397 /* 425 /*
398 * On reset of "interrupt active" bit corresponding 426 * On reset of "interrupt active" bit corresponding
@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
400 * Now we need to re-enable interrupt for the counter. 428 * Now we need to re-enable interrupt for the counter.
401 */ 429 */
402 write_aux_reg(ARC_REG_PCT_INT_CTRL, 430 write_aux_reg(ARC_REG_PCT_INT_CTRL,
403 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 431 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
404 432
405 event = pmu_cpu->act_counter[idx]; 433 event = pmu_cpu->act_counter[idx];
406 hwc = &event->hw; 434 hwc = &event->hw;
@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
414 arc_pmu_stop(event, 0); 442 arc_pmu_stop(event, 0);
415 } 443 }
416 444
417 active_ints &= ~(1U << idx); 445 active_ints &= ~BIT(idx);
418 } while (active_ints); 446 } while (active_ints);
419 447
420done: 448done:
@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
441 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 469 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
442} 470}
443 471
472/* Event field occupies the bottom 15 bits of our config field */
473PMU_FORMAT_ATTR(event, "config:0-14");
474static struct attribute *arc_pmu_format_attrs[] = {
475 &format_attr_event.attr,
476 NULL,
477};
478
479static struct attribute_group arc_pmu_format_attr_gr = {
480 .name = "format",
481 .attrs = arc_pmu_format_attrs,
482};
483
484static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
485 struct device_attribute *attr,
486 char *page)
487{
488 struct perf_pmu_events_attr *pmu_attr;
489
490 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
491 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
492}
493
494/*
495 * We don't add attrs here as we don't have pre-defined list of perf events.
496 * We will generate and add attrs dynamically in probe() after we read HW
497 * configuration.
498 */
499static struct attribute_group arc_pmu_events_attr_gr = {
500 .name = "events",
501};
502
503static void arc_pmu_add_raw_event_attr(int j, char *str)
504{
505 memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
506 arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
507 arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
508 arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
509 arc_pmu->attr[j].id = j;
510 arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
511}
512
513static int arc_pmu_raw_alloc(struct device *dev)
514{
515 arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
516 sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
517 if (!arc_pmu->attr)
518 return -ENOMEM;
519
520 arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
521 sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
522 if (!arc_pmu->attrs)
523 return -ENOMEM;
524
525 arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
526 sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
527 if (!arc_pmu->raw_entry)
528 return -ENOMEM;
529
530 return 0;
531}
532
533static inline bool event_in_hw_event_map(int i, char *name)
534{
535 if (!arc_pmu_ev_hw_map[i])
536 return false;
537
538 if (!strlen(arc_pmu_ev_hw_map[i]))
539 return false;
540
541 if (strcmp(arc_pmu_ev_hw_map[i], name))
542 return false;
543
544 return true;
545}
546
547static void arc_pmu_map_hw_event(int j, char *str)
548{
549 int i;
550
551 /* See if HW condition has been mapped to a perf event_id */
552 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
553 if (event_in_hw_event_map(i, str)) {
554 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
555 i, str, j);
556 arc_pmu->ev_hw_idx[i] = j;
557 }
558 }
559}
560
444static int arc_pmu_device_probe(struct platform_device *pdev) 561static int arc_pmu_device_probe(struct platform_device *pdev)
445{ 562{
446 struct arc_reg_pct_build pct_bcr; 563 struct arc_reg_pct_build pct_bcr;
447 struct arc_reg_cc_build cc_bcr; 564 struct arc_reg_cc_build cc_bcr;
448 int i, j, has_interrupts; 565 int i, has_interrupts;
449 int counter_size; /* in bits */ 566 int counter_size; /* in bits */
450 567
451 union cc_name { 568 union cc_name {
452 struct { 569 struct {
453 uint32_t word0, word1; 570 u32 word0, word1;
454 char sentinel; 571 char sentinel;
455 } indiv; 572 } indiv;
456 char str[9]; 573 char str[ARCPMU_EVENT_NAME_LEN];
457 } cc_name; 574 } cc_name;
458 575
459 576
@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
463 return -ENODEV; 580 return -ENODEV;
464 } 581 }
465 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); 582 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
466 BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); 583 if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
584 return -EINVAL;
467 585
468 READ_BCR(ARC_REG_CC_BUILD, cc_bcr); 586 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
469 BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ 587 if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
588 return -EINVAL;
470 589
471 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); 590 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
472 if (!arc_pmu) 591 if (!arc_pmu)
473 return -ENOMEM; 592 return -ENOMEM;
474 593
594 arc_pmu->n_events = cc_bcr.c;
595
596 if (arc_pmu_raw_alloc(&pdev->dev))
597 return -ENOMEM;
598
475 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; 599 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
476 600
477 arc_pmu->n_counters = pct_bcr.c; 601 arc_pmu->n_counters = pct_bcr.c;
@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
481 605
482 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", 606 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
483 arc_pmu->n_counters, counter_size, cc_bcr.c, 607 arc_pmu->n_counters, counter_size, cc_bcr.c,
484 has_interrupts ? ", [overflow IRQ support]":""); 608 has_interrupts ? ", [overflow IRQ support]" : "");
485 609
486 cc_name.str[8] = 0; 610 cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
487 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) 611 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
488 arc_pmu->ev_hw_idx[i] = -1; 612 arc_pmu->ev_hw_idx[i] = -1;
489 613
490 /* loop thru all available h/w condition indexes */ 614 /* loop thru all available h/w condition indexes */
491 for (j = 0; j < cc_bcr.c; j++) { 615 for (i = 0; i < cc_bcr.c; i++) {
492 write_aux_reg(ARC_REG_CC_INDEX, j); 616 write_aux_reg(ARC_REG_CC_INDEX, i);
493 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); 617 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
494 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); 618 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
495 619
496 /* See if it has been mapped to a perf event_id */ 620 arc_pmu_map_hw_event(i, cc_name.str);
497 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { 621 arc_pmu_add_raw_event_attr(i, cc_name.str);
498 if (arc_pmu_ev_hw_map[i] &&
499 !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
500 strlen(arc_pmu_ev_hw_map[i])) {
501 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
502 i, cc_name.str, j);
503 arc_pmu->ev_hw_idx[i] = j;
504 }
505 }
506 } 622 }
507 623
624 arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
625 arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
626 arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
627
508 arc_pmu->pmu = (struct pmu) { 628 arc_pmu->pmu = (struct pmu) {
509 .pmu_enable = arc_pmu_enable, 629 .pmu_enable = arc_pmu_enable,
510 .pmu_disable = arc_pmu_disable, 630 .pmu_disable = arc_pmu_disable,
@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
514 .start = arc_pmu_start, 634 .start = arc_pmu_start,
515 .stop = arc_pmu_stop, 635 .stop = arc_pmu_stop,
516 .read = arc_pmu_read, 636 .read = arc_pmu_read,
637 .attr_groups = arc_pmu->attr_groups,
517 }; 638 };
518 639
519 if (has_interrupts) { 640 if (has_interrupts) {
@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
535 } else 656 } else
536 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 657 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
537 658
538 return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); 659 /*
660 * perf parser doesn't really like '-' symbol in events name, so let's
661 * use '_' in arc pct name as it goes to kernel PMU event prefix.
662 */
663 return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
539} 664}
540 665
541#ifdef CONFIG_OF
542static const struct of_device_id arc_pmu_match[] = { 666static const struct of_device_id arc_pmu_match[] = {
543 { .compatible = "snps,arc700-pct" }, 667 { .compatible = "snps,arc700-pct" },
544 { .compatible = "snps,archs-pct" }, 668 { .compatible = "snps,archs-pct" },
545 {}, 669 {},
546}; 670};
547MODULE_DEVICE_TABLE(of, arc_pmu_match); 671MODULE_DEVICE_TABLE(of, arc_pmu_match);
548#endif
549 672
550static struct platform_driver arc_pmu_driver = { 673static struct platform_driver arc_pmu_driver = {
551 .driver = { 674 .driver = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 2e018b8c2e19..feb90093e6b1 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void)
123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
124 const struct id_to_str *tbl; 124 const struct id_to_str *tbl;
125 struct bcr_isa_arcv2 isa; 125 struct bcr_isa_arcv2 isa;
126 struct bcr_actionpoint ap;
126 127
127 FIX_PTR(cpu); 128 FIX_PTR(cpu);
128 129
@@ -195,6 +196,7 @@ static void read_arc_build_cfg_regs(void)
195 cpu->bpu.full = bpu.ft; 196 cpu->bpu.full = bpu.ft;
196 cpu->bpu.num_cache = 256 << bpu.bce; 197 cpu->bpu.num_cache = 256 << bpu.bce;
197 cpu->bpu.num_pred = 2048 << bpu.pte; 198 cpu->bpu.num_pred = 2048 << bpu.pte;
199 cpu->bpu.ret_stk = 4 << bpu.rse;
198 200
199 if (cpu->core.family >= 0x54) { 201 if (cpu->core.family >= 0x54) {
200 unsigned int exec_ctrl; 202 unsigned int exec_ctrl;
@@ -207,8 +209,11 @@ static void read_arc_build_cfg_regs(void)
207 } 209 }
208 } 210 }
209 211
210 READ_BCR(ARC_REG_AP_BCR, bcr); 212 READ_BCR(ARC_REG_AP_BCR, ap);
211 cpu->extn.ap = bcr.ver ? 1 : 0; 213 if (ap.ver) {
214 cpu->extn.ap_num = 2 << ap.num;
215 cpu->extn.ap_full = !!ap.min;
216 }
212 217
213 READ_BCR(ARC_REG_SMART_BCR, bcr); 218 READ_BCR(ARC_REG_SMART_BCR, bcr);
214 cpu->extn.smart = bcr.ver ? 1 : 0; 219 cpu->extn.smart = bcr.ver ? 1 : 0;
@@ -216,8 +221,6 @@ static void read_arc_build_cfg_regs(void)
216 READ_BCR(ARC_REG_RTT_BCR, bcr); 221 READ_BCR(ARC_REG_RTT_BCR, bcr);
217 cpu->extn.rtt = bcr.ver ? 1 : 0; 222 cpu->extn.rtt = bcr.ver ? 1 : 0;
218 223
219 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
220
221 READ_BCR(ARC_REG_ISA_CFG_BCR, isa); 224 READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
222 225
223 /* some hacks for lack of feature BCR info in old ARC700 cores */ 226 /* some hacks for lack of feature BCR info in old ARC700 cores */
@@ -299,10 +302,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
299 302
300 if (cpu->bpu.ver) 303 if (cpu->bpu.ver)
301 n += scnprintf(buf + n, len - n, 304 n += scnprintf(buf + n, len - n,
302 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", 305 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
303 IS_AVAIL1(cpu->bpu.full, "full"), 306 IS_AVAIL1(cpu->bpu.full, "full"),
304 IS_AVAIL1(!cpu->bpu.full, "partial"), 307 IS_AVAIL1(!cpu->bpu.full, "partial"),
305 cpu->bpu.num_cache, cpu->bpu.num_pred); 308 cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
306 309
307 if (is_isa_arcv2()) { 310 if (is_isa_arcv2()) {
308 struct bcr_lpb lpb; 311 struct bcr_lpb lpb;
@@ -336,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
336 IS_AVAIL1(cpu->extn.fpu_sp, "SP "), 339 IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
337 IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); 340 IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
338 341
339 if (cpu->extn.debug) 342 if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
340 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", 343 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
341 IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
342 IS_AVAIL1(cpu->extn.smart, "smaRT "), 344 IS_AVAIL1(cpu->extn.smart, "smaRT "),
343 IS_AVAIL1(cpu->extn.rtt, "RTT ")); 345 IS_AVAIL1(cpu->extn.rtt, "RTT "));
346 if (cpu->extn.ap_num) {
347 n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
348 cpu->extn.ap_num,
349 cpu->extn.ap_full ? "full":"min");
350 }
351 n += scnprintf(buf + n, len - n, "\n");
352 }
344 353
345 if (cpu->dccm.sz || cpu->iccm.sz) 354 if (cpu->dccm.sz || cpu->iccm.sz)
346 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", 355 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb452346..215f515442e0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
18#include <asm/arcregs.h> 18#include <asm/arcregs.h>
19#include <asm/irqflags.h> 19#include <asm/irqflags.h>
20 20
21#define ARC_PATH_MAX 256
22
21/* 23/*
22 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) 24 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
23 * -Prints 3 regs per line and a CR. 25 * -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
58 print_reg_file(&(cregs->r13), 13); 60 print_reg_file(&(cregs->r13), 13);
59} 61}
60 62
61static void print_task_path_n_nm(struct task_struct *tsk, char *buf) 63static void print_task_path_n_nm(struct task_struct *tsk)
62{ 64{
63 char *path_nm = NULL; 65 char *path_nm = NULL;
64 struct mm_struct *mm; 66 struct mm_struct *mm;
65 struct file *exe_file; 67 struct file *exe_file;
68 char buf[ARC_PATH_MAX];
66 69
67 mm = get_task_mm(tsk); 70 mm = get_task_mm(tsk);
68 if (!mm) 71 if (!mm)
@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
72 mmput(mm); 75 mmput(mm);
73 76
74 if (exe_file) { 77 if (exe_file) {
75 path_nm = file_path(exe_file, buf, 255); 78 path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
76 fput(exe_file); 79 fput(exe_file);
77 } 80 }
78 81
@@ -80,10 +83,9 @@ done:
80 pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); 83 pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
81} 84}
82 85
83static void show_faulting_vma(unsigned long address, char *buf) 86static void show_faulting_vma(unsigned long address)
84{ 87{
85 struct vm_area_struct *vma; 88 struct vm_area_struct *vma;
86 char *nm = buf;
87 struct mm_struct *active_mm = current->active_mm; 89 struct mm_struct *active_mm = current->active_mm;
88 90
89 /* can't use print_vma_addr() yet as it doesn't check for 91 /* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
96 * if the container VMA is not found 98 * if the container VMA is not found
97 */ 99 */
98 if (vma && (vma->vm_start <= address)) { 100 if (vma && (vma->vm_start <= address)) {
101 char buf[ARC_PATH_MAX];
102 char *nm = "?";
103
99 if (vma->vm_file) { 104 if (vma->vm_file) {
100 nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); 105 nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
101 if (IS_ERR(nm)) 106 if (IS_ERR(nm))
102 nm = "?"; 107 nm = "?";
103 } 108 }
@@ -173,13 +178,14 @@ void show_regs(struct pt_regs *regs)
173{ 178{
174 struct task_struct *tsk = current; 179 struct task_struct *tsk = current;
175 struct callee_regs *cregs; 180 struct callee_regs *cregs;
176 char *buf;
177 181
178 buf = (char *)__get_free_page(GFP_KERNEL); 182 /*
179 if (!buf) 183 * generic code calls us with preemption disabled, but some calls
180 return; 184 * here could sleep, so re-enable to avoid lockdep splat
185 */
186 preempt_enable();
181 187
182 print_task_path_n_nm(tsk, buf); 188 print_task_path_n_nm(tsk);
183 show_regs_print_info(KERN_INFO); 189 show_regs_print_info(KERN_INFO);
184 190
185 show_ecr_verbose(regs); 191 show_ecr_verbose(regs);
@@ -189,7 +195,7 @@ void show_regs(struct pt_regs *regs)
189 (void *)regs->blink, (void *)regs->ret); 195 (void *)regs->blink, (void *)regs->ret);
190 196
191 if (user_mode(regs)) 197 if (user_mode(regs))
192 show_faulting_vma(regs->ret, buf); /* faulting code, not data */ 198 show_faulting_vma(regs->ret); /* faulting code, not data */
193 199
194 pr_info("[STAT32]: 0x%08lx", regs->status32); 200 pr_info("[STAT32]: 0x%08lx", regs->status32);
195 201
@@ -222,7 +228,7 @@ void show_regs(struct pt_regs *regs)
222 if (cregs) 228 if (cregs)
223 show_callee_regs(cregs); 229 show_callee_regs(cregs);
224 230
225 free_page((unsigned long)buf); 231 preempt_disable();
226} 232}
227 233
228void show_kernel_fault_diag(const char *str, struct pt_regs *regs, 234void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 62ad4bcb841a..f230bb7092fd 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -7,11 +7,39 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/cache.h>
10 11
11#undef PREALLOC_NOT_AVAIL 12/*
13 * The memset implementation below is optimized to use prefetchw and prealloc
14 * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
15 * If you want to implement optimized memset for other possible L1 data cache
16 * line lengths (32B and 128B) you should rewrite code carefully checking
17 * we don't call any prefetchw/prealloc instruction for L1 cache lines which
18 * don't belongs to memset area.
19 */
20
21#if L1_CACHE_SHIFT == 6
22
23.macro PREALLOC_INSTR reg, off
24 prealloc [\reg, \off]
25.endm
26
27.macro PREFETCHW_INSTR reg, off
28 prefetchw [\reg, \off]
29.endm
30
31#else
32
33.macro PREALLOC_INSTR
34.endm
35
36.macro PREFETCHW_INSTR
37.endm
38
39#endif
12 40
13ENTRY_CFI(memset) 41ENTRY_CFI(memset)
14 prefetchw [r0] ; Prefetch the write location 42 PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
15 mov.f 0, r2 43 mov.f 0, r2
16;;; if size is zero 44;;; if size is zero
17 jz.d [blink] 45 jz.d [blink]
@@ -48,11 +76,8 @@ ENTRY_CFI(memset)
48 76
49 lpnz @.Lset64bytes 77 lpnz @.Lset64bytes
50 ;; LOOP START 78 ;; LOOP START
51#ifdef PREALLOC_NOT_AVAIL 79 PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
52 prefetchw [r3, 64] ;Prefetch the next write location 80
53#else
54 prealloc [r3, 64]
55#endif
56#ifdef CONFIG_ARC_HAS_LL64 81#ifdef CONFIG_ARC_HAS_LL64
57 std.ab r4, [r3, 8] 82 std.ab r4, [r3, 8]
58 std.ab r4, [r3, 8] 83 std.ab r4, [r3, 8]
@@ -85,7 +110,6 @@ ENTRY_CFI(memset)
85 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes 110 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
86 lpnz .Lset32bytes 111 lpnz .Lset32bytes
87 ;; LOOP START 112 ;; LOOP START
88 prefetchw [r3, 32] ;Prefetch the next write location
89#ifdef CONFIG_ARC_HAS_LL64 113#ifdef CONFIG_ARC_HAS_LL64
90 std.ab r4, [r3, 8] 114 std.ab r4, [r3, 8]
91 std.ab r4, [r3, 8] 115 std.ab r4, [r3, 8]
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index a1d723197084..8df1638259f3 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -141,12 +141,17 @@ good_area:
141 */ 141 */
142 fault = handle_mm_fault(vma, address, flags); 142 fault = handle_mm_fault(vma, address, flags);
143 143
144 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
145 if (fatal_signal_pending(current)) { 144 if (fatal_signal_pending(current)) {
146 if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) 145
147 up_read(&mm->mmap_sem); 146 /*
148 if (user_mode(regs)) 147 * if fault retry, mmap_sem already relinquished by core mm
148 * so OK to return to user mode (with signal handled first)
149 */
150 if (fault & VM_FAULT_RETRY) {
151 if (!user_mode(regs))
152 goto no_context;
149 return; 153 return;
154 }
150 } 155 }
151 156
152 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 43bf4c3a1290..e1ab2d7f1d64 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -119,7 +119,8 @@ void __init setup_arch_memory(void)
119 */ 119 */
120 120
121 memblock_add_node(low_mem_start, low_mem_sz, 0); 121 memblock_add_node(low_mem_start, low_mem_sz, 0);
122 memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); 122 memblock_reserve(CONFIG_LINUX_LINK_BASE,
123 __pa(_end) - CONFIG_LINUX_LINK_BASE);
123 124
124#ifdef CONFIG_BLK_DEV_INITRD 125#ifdef CONFIG_BLK_DEV_INITRD
125 if (phys_initrd_size) { 126 if (phys_initrd_size) {
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index b3ef061d8b74..2c403e7c782d 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1 +1,95 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
3#define _ASM_ARM_XEN_PAGE_COHERENT_H
4
5#include <linux/dma-mapping.h>
6#include <asm/page.h>
1#include <xen/arm/page-coherent.h> 7#include <xen/arm/page-coherent.h>
8
9static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
10{
11 if (dev && dev->archdata.dev_dma_ops)
12 return dev->archdata.dev_dma_ops;
13 return get_arch_dma_ops(NULL);
14}
15
16static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
17 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
18{
19 return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
20}
21
22static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
23 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
24{
25 xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
26}
27
28static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
29 dma_addr_t dev_addr, unsigned long offset, size_t size,
30 enum dma_data_direction dir, unsigned long attrs)
31{
32 unsigned long page_pfn = page_to_xen_pfn(page);
33 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
34 unsigned long compound_pages =
35 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
36 bool local = (page_pfn <= dev_pfn) &&
37 (dev_pfn - page_pfn < compound_pages);
38
39 /*
40 * Dom0 is mapped 1:1, while the Linux page can span across
41 * multiple Xen pages, it's not possible for it to contain a
42 * mix of local and foreign Xen pages. So if the first xen_pfn
43 * == mfn the page is local otherwise it's a foreign page
44 * grant-mapped in dom0. If the page is local we can safely
45 * call the native dma_ops function, otherwise we call the xen
46 * specific function.
47 */
48 if (local)
49 xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
50 else
51 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
52}
53
54static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
55 size_t size, enum dma_data_direction dir, unsigned long attrs)
56{
57 unsigned long pfn = PFN_DOWN(handle);
58 /*
59 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
60 * multiple Xen page, it's not possible to have a mix of local and
61 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
62 * foreign mfn will always return false. If the page is local we can
63 * safely call the native dma_ops function, otherwise we call the xen
64 * specific function.
65 */
66 if (pfn_valid(pfn)) {
67 if (xen_get_dma_ops(hwdev)->unmap_page)
68 xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
69 } else
70 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
71}
72
73static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
74 dma_addr_t handle, size_t size, enum dma_data_direction dir)
75{
76 unsigned long pfn = PFN_DOWN(handle);
77 if (pfn_valid(pfn)) {
78 if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
79 xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
80 } else
81 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
82}
83
84static inline void xen_dma_sync_single_for_device(struct device *hwdev,
85 dma_addr_t handle, size_t size, enum dma_data_direction dir)
86{
87 unsigned long pfn = PFN_DOWN(handle);
88 if (pfn_valid(pfn)) {
89 if (xen_get_dma_ops(hwdev)->sync_single_for_device)
90 xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
91 } else
92 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
93}
94
95#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..95a11d5b3587 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
83 } else /* remote PCI bus */ 83 } else /* remote PCI bus */
84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20); 84 base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
85 85
86 return base + (where & 0xffc) + (devfn << 12); 86 return base + where + (devfn << 12);
87} 87}
88 88
89static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, 89static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
93 u32 mask = (0x1ull << (size * 8)) - 1; 93 u32 mask = (0x1ull << (size * 8)) - 1;
94 int shift = (where % 4) * 8; 94 int shift = (where % 4) * 8;
95 95
96 ret = pci_generic_config_read32(bus, devfn, where, size, val); 96 ret = pci_generic_config_read(bus, devfn, where, size, val);
97 97
98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && 98 if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
99 (where & 0xffc) == PCI_CLASS_REVISION) 99 (where & 0xffc) == PCI_CLASS_REVISION)
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 3dd3d664c5c5..4658c937e173 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -20,9 +20,6 @@ struct dev_archdata {
20#ifdef CONFIG_IOMMU_API 20#ifdef CONFIG_IOMMU_API
21 void *iommu; /* private IOMMU data */ 21 void *iommu; /* private IOMMU data */
22#endif 22#endif
23#ifdef CONFIG_XEN
24 const struct dma_map_ops *dev_dma_ops;
25#endif
26}; 23};
27 24
28struct pdev_archdata { 25struct pdev_archdata {
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index b3ef061d8b74..d88e56b90b93 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1,77 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
3#define _ASM_ARM64_XEN_PAGE_COHERENT_H
4
5#include <linux/dma-mapping.h>
6#include <asm/page.h>
1#include <xen/arm/page-coherent.h> 7#include <xen/arm/page-coherent.h>
8
9static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
10 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
11{
12 return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
17{
18 dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs);
19}
20
21static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
22 dma_addr_t handle, size_t size, enum dma_data_direction dir)
23{
24 unsigned long pfn = PFN_DOWN(handle);
25
26 if (pfn_valid(pfn))
27 dma_direct_sync_single_for_cpu(hwdev, handle, size, dir);
28 else
29 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
30}
31
32static inline void xen_dma_sync_single_for_device(struct device *hwdev,
33 dma_addr_t handle, size_t size, enum dma_data_direction dir)
34{
35 unsigned long pfn = PFN_DOWN(handle);
36 if (pfn_valid(pfn))
37 dma_direct_sync_single_for_device(hwdev, handle, size, dir);
38 else
39 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
40}
41
42static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
43 dma_addr_t dev_addr, unsigned long offset, size_t size,
44 enum dma_data_direction dir, unsigned long attrs)
45{
46 unsigned long page_pfn = page_to_xen_pfn(page);
47 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
48 unsigned long compound_pages =
49 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
50 bool local = (page_pfn <= dev_pfn) &&
51 (dev_pfn - page_pfn < compound_pages);
52
53 if (local)
54 dma_direct_map_page(hwdev, page, offset, size, dir, attrs);
55 else
56 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
57}
58
59static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
60 size_t size, enum dma_data_direction dir, unsigned long attrs)
61{
62 unsigned long pfn = PFN_DOWN(handle);
63 /*
64 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
65 * multiple Xen page, it's not possible to have a mix of local and
66 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
67 * foreign mfn will always return false. If the page is local we can
68 * safely call the native dma_ops function, otherwise we call the xen
69 * specific function.
70 */
71 if (pfn_valid(pfn))
72 dma_direct_unmap_page(hwdev, handle, size, dir, attrs);
73 else
74 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
75}
76
77#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 29cdc99688f3..9859e1178e6b 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
299 dcache_clean_range(__idmap_text_start, __idmap_text_end); 299 dcache_clean_range(__idmap_text_start, __idmap_text_end);
300 300
301 /* Clean kvm setup code to PoC? */ 301 /* Clean kvm setup code to PoC? */
302 if (el2_reset_needed()) 302 if (el2_reset_needed()) {
303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); 303 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
304 dcache_clean_range(__hyp_text_start, __hyp_text_end);
305 }
304 306
305 /* make the crash dump kernel image protected again */ 307 /* make the crash dump kernel image protected again */
306 crash_post_resume(); 308 crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fbaa374..17f325ba831e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
28#include <asm/virt.h> 28#include <asm/virt.h>
29 29
30 .text 30 .text
31 .pushsection .hyp.text, "ax"
32
31 .align 11 33 .align 11
32 34
33ENTRY(__hyp_stub_vectors) 35ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index ba6b41790fcd..b09b6f75f759 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
88 * we end up running with module randomization disabled. 88 * we end up running with module randomization disabled.
89 */ 89 */
90 module_alloc_base = (u64)_etext - MODULES_VSIZE; 90 module_alloc_base = (u64)_etext - MODULES_VSIZE;
91 __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
91 92
92 /* 93 /*
93 * Try to map the FDT early. If this fails, we simply bail, 94 * Try to map the FDT early. If this fails, we simply bail,
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a5b338b2542..f17afb99890c 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
478 addr < (unsigned long)__entry_text_end) || 478 addr < (unsigned long)__entry_text_end) ||
479 (addr >= (unsigned long)__idmap_text_start && 479 (addr >= (unsigned long)__idmap_text_start &&
480 addr < (unsigned long)__idmap_text_end) || 480 addr < (unsigned long)__idmap_text_end) ||
481 (addr >= (unsigned long)__hyp_text_start &&
482 addr < (unsigned long)__hyp_text_end) ||
481 !!search_exception_tables(addr)) 483 !!search_exception_tables(addr))
482 return true; 484 return true;
483 485
484 if (!is_kernel_in_hyp_mode()) { 486 if (!is_kernel_in_hyp_mode()) {
485 if ((addr >= (unsigned long)__hyp_text_start && 487 if ((addr >= (unsigned long)__hyp_idmap_text_start &&
486 addr < (unsigned long)__hyp_text_end) ||
487 (addr >= (unsigned long)__hyp_idmap_text_start &&
488 addr < (unsigned long)__hyp_idmap_text_end)) 488 addr < (unsigned long)__hyp_idmap_text_end))
489 return true; 489 return true;
490 } 490 }
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index fb0908456a1f..78c0a72f822c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -466,9 +466,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
466 __iommu_setup_dma_ops(dev, dma_base, size, iommu); 466 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
467 467
468#ifdef CONFIG_XEN 468#ifdef CONFIG_XEN
469 if (xen_initial_domain()) { 469 if (xen_initial_domain())
470 dev->archdata.dev_dma_ops = dev->dma_ops;
471 dev->dma_ops = xen_dma_ops; 470 dev->dma_ops = xen_dma_ops;
472 }
473#endif 471#endif
474} 472}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 30695a868107..5c9073bace83 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
33 __clean_dcache_area_pou(kaddr, len); 33 __clean_dcache_area_pou(kaddr, len);
34 __flush_icache_all(); 34 __flush_icache_all();
35 } else { 35 } else {
36 flush_icache_range(addr, addr + len); 36 /*
37 * Don't issue kick_all_cpus_sync() after I-cache invalidation
38 * for user mappings.
39 */
40 __flush_icache_range(addr, addr + len);
37 } 41 }
38} 42}
39 43
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 33a2c94fed0d..63b4a1705182 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += pgalloc.h
30generic-y += preempt.h 30generic-y += preempt.h
31generic-y += segment.h 31generic-y += segment.h
32generic-y += serial.h 32generic-y += serial.h
33generic-y += shmparam.h
33generic-y += tlbflush.h 34generic-y += tlbflush.h
34generic-y += topology.h 35generic-y += topology.h
35generic-y += trace_clock.h 36generic-y += trace_clock.h
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index cd400d353d18..961c1dc064e1 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -40,6 +40,7 @@ generic-y += preempt.h
40generic-y += scatterlist.h 40generic-y += scatterlist.h
41generic-y += sections.h 41generic-y += sections.h
42generic-y += serial.h 42generic-y += serial.h
43generic-y += shmparam.h
43generic-y += sizes.h 44generic-y += sizes.h
44generic-y += spinlock.h 45generic-y += spinlock.h
45generic-y += timex.h 46generic-y += timex.h
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 47c4da3d64a4..b25fd42aa0f4 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += rwsem.h
30generic-y += sections.h 30generic-y += sections.h
31generic-y += segment.h 31generic-y += segment.h
32generic-y += serial.h 32generic-y += serial.h
33generic-y += shmparam.h
33generic-y += sizes.h 34generic-y += sizes.h
34generic-y += topology.h 35generic-y += topology.h
35generic-y += trace_clock.h 36generic-y += trace_clock.h
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index 61d955c1747a..c1b06dcf6cf8 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,4 +1,3 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += shmparam.h
4generic-y += ucontext.h 3generic-y += ucontext.h
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 9f1dd26903e3..95f8f631c4df 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h
20generic-y += percpu.h 20generic-y += percpu.h
21generic-y += preempt.h 21generic-y += preempt.h
22generic-y += sections.h 22generic-y += sections.h
23generic-y += shmparam.h
23generic-y += spinlock.h 24generic-y += spinlock.h
24generic-y += topology.h 25generic-y += topology.h
25generic-y += trace_clock.h 26generic-y += trace_clock.h
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index b8b3525271fa..960bf1e4be53 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += shmparam.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9c7d1d25bf3d..791cc8d54d0a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += parport.h
26generic-y += percpu.h 26generic-y += percpu.h
27generic-y += preempt.h 27generic-y += preempt.h
28generic-y += serial.h 28generic-y += serial.h
29generic-y += shmparam.h
29generic-y += syscalls.h 30generic-y += syscalls.h
30generic-y += topology.h 31generic-y += topology.h
31generic-y += trace_clock.h 32generic-y += trace_clock.h
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 28823e3db825..97823ec46e97 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += shmparam.h
6generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index eb87cd8327c8..1f04844b6b82 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -34,6 +34,7 @@ generic-y += qrwlock_types.h
34generic-y += qrwlock.h 34generic-y += qrwlock.h
35generic-y += sections.h 35generic-y += sections.h
36generic-y += segment.h 36generic-y += segment.h
37generic-y += shmparam.h
37generic-y += string.h 38generic-y += string.h
38generic-y += switch_to.h 39generic-y += switch_to.h
39generic-y += topology.h 40generic-y += topology.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index feeeaa60697c..515fc3cc9687 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -103,7 +103,7 @@ choice
103 prompt "Base ISA" 103 prompt "Base ISA"
104 default ARCH_RV64I 104 default ARCH_RV64I
105 help 105 help
106 This selects the base ISA that this kernel will traget and must match 106 This selects the base ISA that this kernel will target and must match
107 the target platform. 107 the target platform.
108 108
109config ARCH_RV32I 109config ARCH_RV32I
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index f399659d3b8d..2fd3461e50ab 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y
13CONFIG_EXPERT=y 13CONFIG_EXPERT=y
14CONFIG_BPF_SYSCALL=y 14CONFIG_BPF_SYSCALL=y
15CONFIG_SMP=y 15CONFIG_SMP=y
16CONFIG_PCI=y
17CONFIG_PCIE_XILINX=y
18CONFIG_MODULES=y 16CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y 17CONFIG_MODULE_UNLOAD=y
20CONFIG_NET=y 18CONFIG_NET=y
@@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y
28CONFIG_IP_PNP_BOOTP=y 26CONFIG_IP_PNP_BOOTP=y
29CONFIG_IP_PNP_RARP=y 27CONFIG_IP_PNP_RARP=y
30CONFIG_NETLINK_DIAG=y 28CONFIG_NETLINK_DIAG=y
29CONFIG_PCI=y
30CONFIG_PCIEPORTBUS=y
31CONFIG_PCI_HOST_GENERIC=y
32CONFIG_PCIE_XILINX=y
31CONFIG_DEVTMPFS=y 33CONFIG_DEVTMPFS=y
32CONFIG_BLK_DEV_LOOP=y 34CONFIG_BLK_DEV_LOOP=y
33CONFIG_VIRTIO_BLK=y 35CONFIG_VIRTIO_BLK=y
@@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y
63CONFIG_USB_UAS=y 65CONFIG_USB_UAS=y
64CONFIG_VIRTIO_MMIO=y 66CONFIG_VIRTIO_MMIO=y
65CONFIG_SIFIVE_PLIC=y 67CONFIG_SIFIVE_PLIC=y
66CONFIG_RAS=y
67CONFIG_EXT4_FS=y 68CONFIG_EXT4_FS=y
68CONFIG_EXT4_FS_POSIX_ACL=y 69CONFIG_EXT4_FS_POSIX_ACL=y
69CONFIG_AUTOFS4_FS=y 70CONFIG_AUTOFS4_FS=y
@@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y
77CONFIG_NFS_V4_2=y 78CONFIG_NFS_V4_2=y
78CONFIG_ROOT_NFS=y 79CONFIG_ROOT_NFS=y
79CONFIG_CRYPTO_USER_API_HASH=y 80CONFIG_CRYPTO_USER_API_HASH=y
81CONFIG_CRYPTO_DEV_VIRTIO=y
80CONFIG_PRINTK_TIME=y 82CONFIG_PRINTK_TIME=y
81# CONFIG_RCU_TRACE is not set 83# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 06cfbb3aacbb..2a546a52f02a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -80,7 +80,7 @@ typedef struct page *pgtable_t;
80#define __pgd(x) ((pgd_t) { (x) }) 80#define __pgd(x) ((pgd_t) { (x) })
81#define __pgprot(x) ((pgprot_t) { (x) }) 81#define __pgprot(x) ((pgprot_t) { (x) })
82 82
83#ifdef CONFIG_64BITS 83#ifdef CONFIG_64BIT
84#define PTE_FMT "%016lx" 84#define PTE_FMT "%016lx"
85#else 85#else
86#define PTE_FMT "%08lx" 86#define PTE_FMT "%08lx"
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 0531f49af5c3..ce70bceb8872 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
22 * This decides where the kernel will search for a free chunk of vm 22 * This decides where the kernel will search for a free chunk of vm
23 * space during mmap's. 23 * space during mmap's.
24 */ 24 */
25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) 25#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
26 26
27#define STACK_TOP TASK_SIZE 27#define STACK_TOP TASK_SIZE
28#define STACK_TOP_MAX STACK_TOP 28#define STACK_TOP_MAX STACK_TOP
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 6a92a2fe198e..dac98348c6a3 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ void asm_offsets(void)
39 OFFSET(TASK_STACK, task_struct, stack); 39 OFFSET(TASK_STACK, task_struct, stack);
40 OFFSET(TASK_TI, task_struct, thread_info); 40 OFFSET(TASK_TI, task_struct, thread_info);
41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); 41 OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
42 OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
42 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); 43 OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
43 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); 44 OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
44 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); 45 OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 355166f57205..fd9b57c8b4ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -144,6 +144,10 @@ _save_context:
144 REG_L x2, PT_SP(sp) 144 REG_L x2, PT_SP(sp)
145 .endm 145 .endm
146 146
147#if !IS_ENABLED(CONFIG_PREEMPT)
148.set resume_kernel, restore_all
149#endif
150
147ENTRY(handle_exception) 151ENTRY(handle_exception)
148 SAVE_ALL 152 SAVE_ALL
149 153
@@ -228,7 +232,7 @@ ret_from_exception:
228 REG_L s0, PT_SSTATUS(sp) 232 REG_L s0, PT_SSTATUS(sp)
229 csrc sstatus, SR_SIE 233 csrc sstatus, SR_SIE
230 andi s0, s0, SR_SPP 234 andi s0, s0, SR_SPP
231 bnez s0, restore_all 235 bnez s0, resume_kernel
232 236
233resume_userspace: 237resume_userspace:
234 /* Interrupts must be disabled here so flags are checked atomically */ 238 /* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
250 RESTORE_ALL 254 RESTORE_ALL
251 sret 255 sret
252 256
257#if IS_ENABLED(CONFIG_PREEMPT)
258resume_kernel:
259 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
260 bnez s0, restore_all
261need_resched:
262 REG_L s0, TASK_TI_FLAGS(tp)
263 andi s0, s0, _TIF_NEED_RESCHED
264 beqz s0, restore_all
265 call preempt_schedule_irq
266 j need_resched
267#endif
268
253work_pending: 269work_pending:
254 /* Enter slow path for supplementary processing */ 270 /* Enter slow path for supplementary processing */
255 la ra, ret_from_exception 271 la ra, ret_from_exception
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 6e079e94b638..77564310235f 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -181,7 +181,7 @@ static void __init setup_bootmem(void)
181 BUG_ON(mem_size == 0); 181 BUG_ON(mem_size == 0);
182 182
183 set_max_mapnr(PFN_DOWN(mem_size)); 183 set_max_mapnr(PFN_DOWN(mem_size));
184 max_low_pfn = memblock_end_of_DRAM(); 184 max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
185 185
186#ifdef CONFIG_BLK_DEV_INITRD 186#ifdef CONFIG_BLK_DEV_INITRD
187 setup_initrd(); 187 setup_initrd();
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index fc185ecabb0a..18cda0e8cf94 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -57,15 +57,12 @@ void __init setup_smp(void)
57 57
58 while ((dn = of_find_node_by_type(dn, "cpu"))) { 58 while ((dn = of_find_node_by_type(dn, "cpu"))) {
59 hart = riscv_of_processor_hartid(dn); 59 hart = riscv_of_processor_hartid(dn);
60 if (hart < 0) { 60 if (hart < 0)
61 of_node_put(dn);
62 continue; 61 continue;
63 }
64 62
65 if (hart == cpuid_to_hartid_map(0)) { 63 if (hart == cpuid_to_hartid_map(0)) {
66 BUG_ON(found_boot_cpu); 64 BUG_ON(found_boot_cpu);
67 found_boot_cpu = 1; 65 found_boot_cpu = 1;
68 of_node_put(dn);
69 continue; 66 continue;
70 } 67 }
71 68
@@ -73,7 +70,6 @@ void __init setup_smp(void)
73 set_cpu_possible(cpuid, true); 70 set_cpu_possible(cpuid, true);
74 set_cpu_present(cpuid, true); 71 set_cpu_present(cpuid, true);
75 cpuid++; 72 cpuid++;
76 of_node_put(dn);
77 } 73 }
78 74
79 BUG_ON(!found_boot_cpu); 75 BUG_ON(!found_boot_cpu);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 1d9bfaff60bc..658ebf645f42 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void)
28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 28 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
29 29
30#ifdef CONFIG_ZONE_DMA32 30#ifdef CONFIG_ZONE_DMA32
31 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); 31 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
32 (unsigned long) PFN_PHYS(max_low_pfn)));
32#endif 33#endif
33 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 34 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
34 35
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index ccbb53e22024..8d04e6f3f796 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -25,7 +25,7 @@ static inline int init_new_context(struct task_struct *tsk,
25 atomic_set(&mm->context.flush_count, 0); 25 atomic_set(&mm->context.flush_count, 0);
26 mm->context.gmap_asce = 0; 26 mm->context.gmap_asce = 0;
27 mm->context.flush_mm = 0; 27 mm->context.flush_mm = 0;
28 mm->context.compat_mm = 0; 28 mm->context.compat_mm = test_thread_flag(TIF_31BIT);
29#ifdef CONFIG_PGSTE 29#ifdef CONFIG_PGSTE
30 mm->context.alloc_pgste = page_table_allocate_pgste || 30 mm->context.alloc_pgste = page_table_allocate_pgste ||
31 test_thread_flag(TIF_PGSTE) || 31 test_thread_flag(TIF_PGSTE) ||
@@ -90,8 +90,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
90{ 90{
91 int cpu = smp_processor_id(); 91 int cpu = smp_processor_id();
92 92
93 if (prev == next)
94 return;
95 S390_lowcore.user_asce = next->context.asce; 93 S390_lowcore.user_asce = next->context.asce;
96 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 94 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
97 /* Clear previous user-ASCE from CR1 and CR7 */ 95 /* Clear previous user-ASCE from CR1 and CR7 */
@@ -103,7 +101,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
103 __ctl_load(S390_lowcore.vdso_asce, 7, 7); 101 __ctl_load(S390_lowcore.vdso_asce, 7, 7);
104 clear_cpu_flag(CIF_ASCE_SECONDARY); 102 clear_cpu_flag(CIF_ASCE_SECONDARY);
105 } 103 }
106 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 104 if (prev != next)
105 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
107} 106}
108 107
109#define finish_arch_post_lock_switch finish_arch_post_lock_switch 108#define finish_arch_post_lock_switch finish_arch_post_lock_switch
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index af5c2b3f7065..a8c7789b246b 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -63,10 +63,10 @@ static noinline __init void detect_machine_type(void)
63 if (stsi(vmms, 3, 2, 2) || !vmms->count) 63 if (stsi(vmms, 3, 2, 2) || !vmms->count)
64 return; 64 return;
65 65
66 /* Running under KVM? If not we assume z/VM */ 66 /* Detect known hypervisors */
67 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) 67 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
68 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; 68 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
69 else 69 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
70 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 70 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
71} 71}
72 72
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 72dd23ef771b..7ed90a759135 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1006,6 +1006,8 @@ void __init setup_arch(char **cmdline_p)
1006 pr_info("Linux is running under KVM in 64-bit mode\n"); 1006 pr_info("Linux is running under KVM in 64-bit mode\n");
1007 else if (MACHINE_IS_LPAR) 1007 else if (MACHINE_IS_LPAR)
1008 pr_info("Linux is running natively in 64-bit mode\n"); 1008 pr_info("Linux is running natively in 64-bit mode\n");
1009 else
1010 pr_info("Linux is running as a guest in 64-bit mode\n");
1009 1011
1010 /* Have one command line that is parsed and saved in /proc/cmdline */ 1012 /* Have one command line that is parsed and saved in /proc/cmdline */
1011 /* boot_command_line has been already set up in early.c */ 1013 /* boot_command_line has been already set up in early.c */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f82b3d3c36e2..b198ece2aad6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -381,8 +381,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
381 */ 381 */
382void smp_call_ipl_cpu(void (*func)(void *), void *data) 382void smp_call_ipl_cpu(void (*func)(void *), void *data)
383{ 383{
384 struct lowcore *lc = pcpu_devices->lowcore;
385
386 if (pcpu_devices[0].address == stap())
387 lc = &S390_lowcore;
388
384 pcpu_delegate(&pcpu_devices[0], func, data, 389 pcpu_delegate(&pcpu_devices[0], func, data,
385 pcpu_devices->lowcore->nodat_stack); 390 lc->nodat_stack);
386} 391}
387 392
388int smp_find_processor_id(u16 address) 393int smp_find_processor_id(u16 address)
@@ -1166,7 +1171,11 @@ static ssize_t __ref rescan_store(struct device *dev,
1166{ 1171{
1167 int rc; 1172 int rc;
1168 1173
1174 rc = lock_device_hotplug_sysfs();
1175 if (rc)
1176 return rc;
1169 rc = smp_rescan_cpus(); 1177 rc = smp_rescan_cpus();
1178 unlock_device_hotplug();
1170 return rc ? rc : count; 1179 return rc ? rc : count;
1171} 1180}
1172static DEVICE_ATTR_WO(rescan); 1181static DEVICE_ATTR_WO(rescan);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ebe748a9f472..4ff354887db4 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -224,10 +224,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
224 224
225 vdso_pages = vdso64_pages; 225 vdso_pages = vdso64_pages;
226#ifdef CONFIG_COMPAT 226#ifdef CONFIG_COMPAT
227 if (is_compat_task()) { 227 mm->context.compat_mm = is_compat_task();
228 if (mm->context.compat_mm)
228 vdso_pages = vdso32_pages; 229 vdso_pages = vdso32_pages;
229 mm->context.compat_mm = 1;
230 }
231#endif 230#endif
232 /* 231 /*
233 * vDSO has a problem and was disabled, just don't "enable" it for 232 * vDSO has a problem and was disabled, just don't "enable" it for
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1372553dc0a9..1d1544b6ca74 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += preempt.h
28generic-y += sections.h 28generic-y += sections.h
29generic-y += segment.h 29generic-y += segment.h
30generic-y += serial.h 30generic-y += serial.h
31generic-y += shmparam.h
31generic-y += sizes.h 32generic-y += sizes.h
32generic-y += syscalls.h 33generic-y += syscalls.h
33generic-y += topology.h 34generic-y += topology.h
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 6c6f6301012e..0febf1a07c30 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4b4a7f32b68e..68261430fe6e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -198,7 +198,7 @@ config X86
198 select IRQ_FORCED_THREADING 198 select IRQ_FORCED_THREADING
199 select NEED_SG_DMA_LENGTH 199 select NEED_SG_DMA_LENGTH
200 select PCI_DOMAINS if PCI 200 select PCI_DOMAINS if PCI
201 select PCI_LOCKLESS_CONFIG 201 select PCI_LOCKLESS_CONFIG if PCI
202 select PERF_EVENTS 202 select PERF_EVENTS
203 select RTC_LIB 203 select RTC_LIB
204 select RTC_MC146818_LIB 204 select RTC_MC146818_LIB
@@ -446,12 +446,12 @@ config RETPOLINE
446 branches. Requires a compiler with -mindirect-branch=thunk-extern 446 branches. Requires a compiler with -mindirect-branch=thunk-extern
447 support for full protection. The kernel may run slower. 447 support for full protection. The kernel may run slower.
448 448
449config X86_RESCTRL 449config X86_CPU_RESCTRL
450 bool "Resource Control support" 450 bool "x86 CPU resource control support"
451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
452 select KERNFS 452 select KERNFS
453 help 453 help
454 Enable Resource Control support. 454 Enable x86 CPU resource control support.
455 455
456 Provide support for the allocation and monitoring of system resources 456 Provide support for the allocation and monitoring of system resources
457 usage by the CPU. 457 usage by the CPU.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 64037895b085..f105ae8651c9 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax 600 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
601 movl %eax, %cr3 601 movl %eax, %cr3
6023: 6023:
603 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
604 pushl %ecx
605 movl $MSR_EFER, %ecx
606 rdmsr
607 btsl $_EFER_LME, %eax
608 wrmsr
609 popl %ecx
610
603 /* Enable PAE and LA57 (if required) paging modes */ 611 /* Enable PAE and LA57 (if required) paging modes */
604 movl $X86_CR4_PAE, %eax 612 movl $X86_CR4_PAE, %eax
605 cmpl $0, %edx 613 cmpl $0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f75638f6e6..6ff7e81b5628 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 6#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
7 7
8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE 8#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
9#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 9#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
10 10
11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE 11#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
12 12
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 8eaf8952c408..39913770a44d 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
361 361
362 /* Need to switch before accessing the thread stack. */ 362 /* Need to switch before accessing the thread stack. */
363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
364 movq %rsp, %rdi 364 /* In the Xen PV case we already run on the thread stack. */
365 ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
365 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 366 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
366 367
367 pushq 6*8(%rdi) /* regs->ss */ 368 pushq 6*8(%rdi) /* regs->ss */
@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
370 pushq 3*8(%rdi) /* regs->cs */ 371 pushq 3*8(%rdi) /* regs->cs */
371 pushq 2*8(%rdi) /* regs->ip */ 372 pushq 2*8(%rdi) /* regs->ip */
372 pushq 1*8(%rdi) /* regs->orig_ax */ 373 pushq 1*8(%rdi) /* regs->orig_ax */
373
374 pushq (%rdi) /* pt_regs->di */ 374 pushq (%rdi) /* pt_regs->di */
375.Lint80_keep_stack:
376
375 pushq %rsi /* pt_regs->si */ 377 pushq %rsi /* pt_regs->si */
376 xorl %esi, %esi /* nospec si */ 378 xorl %esi, %esi /* nospec si */
377 pushq %rdx /* pt_regs->dx */ 379 pushq %rdx /* pt_regs->dx */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0dd6b0f4000e..d9a9993af882 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -6,7 +6,7 @@
6 * "Big Core" Processors (Branded as Core, Xeon, etc...) 6 * "Big Core" Processors (Branded as Core, Xeon, etc...)
7 * 7 *
8 * The "_X" parts are generally the EP and EX Xeons, or the 8 * The "_X" parts are generally the EP and EX Xeons, or the
9 * "Extreme" ones, like Broadwell-E. 9 * "Extreme" ones, like Broadwell-E, or Atom microserver.
10 * 10 *
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
@@ -71,6 +71,7 @@
71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ 71#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ 72#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ 73#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
74#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
74 75
75/* Xeon Phi */ 76/* Xeon Phi */
76 77
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 0ca50611e8ce..19d18fae6ec6 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
178 178
179void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 179void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
180 180
181/*
182 * Init a new mm. Used on mm copies, like at fork()
183 * and on mm's that are brand-new, like at execve().
184 */
181static inline int init_new_context(struct task_struct *tsk, 185static inline int init_new_context(struct task_struct *tsk,
182 struct mm_struct *mm) 186 struct mm_struct *mm)
183{ 187{
@@ -228,8 +232,22 @@ do { \
228} while (0) 232} while (0)
229#endif 233#endif
230 234
235static inline void arch_dup_pkeys(struct mm_struct *oldmm,
236 struct mm_struct *mm)
237{
238#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
239 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
240 return;
241
242 /* Duplicate the oldmm pkey state in mm: */
243 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
244 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
245#endif
246}
247
231static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 248static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
232{ 249{
250 arch_dup_pkeys(oldmm, mm);
233 paravirt_arch_dup_mmap(oldmm, mm); 251 paravirt_arch_dup_mmap(oldmm, mm);
234 return ldt_dup_context(oldmm, mm); 252 return ldt_dup_context(oldmm, mm);
235} 253}
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..0ce558a8150d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
7#endif 7#endif
8 8
9#ifdef CONFIG_KASAN 9#ifdef CONFIG_KASAN
10#ifdef CONFIG_KASAN_EXTRA
11#define KASAN_STACK_ORDER 2
12#else
10#define KASAN_STACK_ORDER 1 13#define KASAN_STACK_ORDER 1
14#endif
11#else 15#else
12#define KASAN_STACK_ORDER 0 16#define KASAN_STACK_ORDER 0
13#endif 17#endif
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 40ebddde6ac2..f6b7fe2833cc 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -2,7 +2,7 @@
2#ifndef _ASM_X86_RESCTRL_SCHED_H 2#ifndef _ASM_X86_RESCTRL_SCHED_H
3#define _ASM_X86_RESCTRL_SCHED_H 3#define _ASM_X86_RESCTRL_SCHED_H
4 4
5#ifdef CONFIG_X86_RESCTRL 5#ifdef CONFIG_X86_CPU_RESCTRL
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/jump_label.h> 8#include <linux/jump_label.h>
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
88 88
89static inline void resctrl_sched_in(void) {} 89static inline void resctrl_sched_in(void) {}
90 90
91#endif /* CONFIG_X86_RESCTRL */ 91#endif /* CONFIG_X86_CPU_RESCTRL */
92 92
93#endif /* _ASM_X86_RESCTRL_SCHED_H */ 93#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index b6fa0869f7aa..cfd24f9f7614 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
39obj-$(CONFIG_X86_MCE) += mce/ 39obj-$(CONFIG_X86_MCE) += mce/
40obj-$(CONFIG_MTRR) += mtrr/ 40obj-$(CONFIG_MTRR) += mtrr/
41obj-$(CONFIG_MICROCODE) += microcode/ 41obj-$(CONFIG_MICROCODE) += microcode/
42obj-$(CONFIG_X86_RESCTRL) += resctrl/ 42obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/
43 43
44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
45 45
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 1de0f4170178..01874d54f4fd 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -71,7 +71,7 @@ void __init check_bugs(void)
71 * identify_boot_cpu() initialized SMT support information, let the 71 * identify_boot_cpu() initialized SMT support information, let the
72 * core code know. 72 * core code know.
73 */ 73 */
74 cpu_smt_check_topology_early(); 74 cpu_smt_check_topology();
75 75
76 if (!IS_ENABLED(CONFIG_SMP)) { 76 if (!IS_ENABLED(CONFIG_SMP)) {
77 pr_info("CPU: "); 77 pr_info("CPU: ");
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 51adde0a0f1a..e1f3ba19ba54 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
855 if (!p) { 855 if (!p) {
856 return ret; 856 return ret;
857 } else { 857 } else {
858 if (boot_cpu_data.microcode == p->patch_id) 858 if (boot_cpu_data.microcode >= p->patch_id)
859 return ret; 859 return ret;
860 860
861 ret = UCODE_NEW; 861 ret = UCODE_NEW;
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 1cabe6fd8e11..4a06c37b9cf1 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o 2obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
3obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o 3obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
4CFLAGS_pseudo_lock.o = -I$(src) 4CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c8b07d8ea5a2..17ffc869cab8 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
470 470
471 kbuf.memsz = kbuf.bufsz; 471 kbuf.memsz = kbuf.bufsz;
472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN; 472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
473 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
473 ret = kexec_add_buffer(&kbuf); 474 ret = kexec_add_buffer(&kbuf);
474 if (ret) { 475 if (ret) {
475 vfree((void *)image->arch.elf_headers); 476 vfree((void *)image->arch.elf_headers);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index b0acb22e5a46..dfd3aca82c61 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -21,10 +21,6 @@
21 21
22#define HPET_MASK CLOCKSOURCE_MASK(32) 22#define HPET_MASK CLOCKSOURCE_MASK(32)
23 23
24/* FSEC = 10^-15
25 NSEC = 10^-9 */
26#define FSEC_PER_NSEC 1000000L
27
28#define HPET_DEV_USED_BIT 2 24#define HPET_DEV_USED_BIT 2
29#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) 25#define HPET_DEV_USED (1 << HPET_DEV_USED_BIT)
30#define HPET_DEV_VALID 0x8 26#define HPET_DEV_VALID 0x8
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07228dd..53917a3ebf94 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
167 struct efi_info *current_ei = &boot_params.efi_info; 167 struct efi_info *current_ei = &boot_params.efi_info;
168 struct efi_info *ei = &params->efi_info; 168 struct efi_info *ei = &params->efi_info;
169 169
170 if (!efi_enabled(EFI_RUNTIME_SERVICES))
171 return 0;
172
170 if (!current_ei->efi_memmap_size) 173 if (!current_ei->efi_memmap_size)
171 return 0; 174 return 0;
172 175
@@ -434,6 +437,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
434 kbuf.memsz = PAGE_ALIGN(header->init_size); 437 kbuf.memsz = PAGE_ALIGN(header->init_size);
435 kbuf.buf_align = header->kernel_alignment; 438 kbuf.buf_align = header->kernel_alignment;
436 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; 439 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
440 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
437 ret = kexec_add_buffer(&kbuf); 441 ret = kexec_add_buffer(&kbuf);
438 if (ret) 442 if (ret)
439 goto out_free_params; 443 goto out_free_params;
@@ -448,6 +452,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
448 kbuf.bufsz = kbuf.memsz = initrd_len; 452 kbuf.bufsz = kbuf.memsz = initrd_len;
449 kbuf.buf_align = PAGE_SIZE; 453 kbuf.buf_align = PAGE_SIZE;
450 kbuf.buf_min = MIN_INITRD_LOAD_ADDR; 454 kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
455 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
451 ret = kexec_add_buffer(&kbuf); 456 ret = kexec_add_buffer(&kbuf);
452 if (ret) 457 if (ret)
453 goto out_free_params; 458 goto out_free_params;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index ba4bfb7f6a36..5c93a65ee1e5 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
457#else 457#else
458 u64 ipi_bitmap = 0; 458 u64 ipi_bitmap = 0;
459#endif 459#endif
460 long ret;
460 461
461 if (cpumask_empty(mask)) 462 if (cpumask_empty(mask))
462 return; 463 return;
@@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
482 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { 483 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
483 max = apic_id < max ? max : apic_id; 484 max = apic_id < max ? max : apic_id;
484 } else { 485 } else {
485 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 486 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
486 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 487 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
488 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
487 min = max = apic_id; 489 min = max = apic_id;
488 ipi_bitmap = 0; 490 ipi_bitmap = 0;
489 } 491 }
@@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector)
491 } 493 }
492 494
493 if (ipi_bitmap) { 495 if (ipi_bitmap) {
494 kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, 496 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
495 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); 497 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
498 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
496 } 499 }
497 500
498 local_irq_restore(flags); 501 local_irq_restore(flags);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e9f777bfed40..3fae23834069 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -297,15 +297,16 @@ static int __init tsc_setup(char *str)
297 297
298__setup("tsc=", tsc_setup); 298__setup("tsc=", tsc_setup);
299 299
300#define MAX_RETRIES 5 300#define MAX_RETRIES 5
301#define SMI_TRESHOLD 50000 301#define TSC_DEFAULT_THRESHOLD 0x20000
302 302
303/* 303/*
304 * Read TSC and the reference counters. Take care of SMI disturbance 304 * Read TSC and the reference counters. Take care of any disturbances
305 */ 305 */
306static u64 tsc_read_refs(u64 *p, int hpet) 306static u64 tsc_read_refs(u64 *p, int hpet)
307{ 307{
308 u64 t1, t2; 308 u64 t1, t2;
309 u64 thresh = tsc_khz ? tsc_khz >> 5 : TSC_DEFAULT_THRESHOLD;
309 int i; 310 int i;
310 311
311 for (i = 0; i < MAX_RETRIES; i++) { 312 for (i = 0; i < MAX_RETRIES; i++) {
@@ -315,7 +316,7 @@ static u64 tsc_read_refs(u64 *p, int hpet)
315 else 316 else
316 *p = acpi_pm_read_early(); 317 *p = acpi_pm_read_early();
317 t2 = get_cycles(); 318 t2 = get_cycles();
318 if ((t2 - t1) < SMI_TRESHOLD) 319 if ((t2 - t1) < thresh)
319 return t2; 320 return t2;
320 } 321 }
321 return ULLONG_MAX; 322 return ULLONG_MAX;
@@ -703,15 +704,15 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
703 * zero. In each wait loop iteration we read the TSC and check 704 * zero. In each wait loop iteration we read the TSC and check
704 * the delta to the previous read. We keep track of the min 705 * the delta to the previous read. We keep track of the min
705 * and max values of that delta. The delta is mostly defined 706 * and max values of that delta. The delta is mostly defined
706 * by the IO time of the PIT access, so we can detect when a 707 * by the IO time of the PIT access, so we can detect when
707 * SMI/SMM disturbance happened between the two reads. If the 708 * any disturbance happened between the two reads. If the
708 * maximum time is significantly larger than the minimum time, 709 * maximum time is significantly larger than the minimum time,
709 * then we discard the result and have another try. 710 * then we discard the result and have another try.
710 * 711 *
711 * 2) Reference counter. If available we use the HPET or the 712 * 2) Reference counter. If available we use the HPET or the
712 * PMTIMER as a reference to check the sanity of that value. 713 * PMTIMER as a reference to check the sanity of that value.
713 * We use separate TSC readouts and check inside of the 714 * We use separate TSC readouts and check inside of the
714 * reference read for a SMI/SMM disturbance. We dicard 715 * reference read for any possible disturbance. We dicard
715 * disturbed values here as well. We do that around the PIT 716 * disturbed values here as well. We do that around the PIT
716 * calibration delay loop as we have to wait for a certain 717 * calibration delay loop as we have to wait for a certain
717 * amount of time anyway. 718 * amount of time anyway.
@@ -744,7 +745,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
744 if (ref1 == ref2) 745 if (ref1 == ref2)
745 continue; 746 continue;
746 747
747 /* Check, whether the sampling was disturbed by an SMI */ 748 /* Check, whether the sampling was disturbed */
748 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) 749 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
749 continue; 750 continue;
750 751
@@ -1268,7 +1269,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1268 */ 1269 */
1269static void tsc_refine_calibration_work(struct work_struct *work) 1270static void tsc_refine_calibration_work(struct work_struct *work)
1270{ 1271{
1271 static u64 tsc_start = -1, ref_start; 1272 static u64 tsc_start = ULLONG_MAX, ref_start;
1272 static int hpet; 1273 static int hpet;
1273 u64 tsc_stop, ref_stop, delta; 1274 u64 tsc_stop, ref_stop, delta;
1274 unsigned long freq; 1275 unsigned long freq;
@@ -1283,14 +1284,15 @@ static void tsc_refine_calibration_work(struct work_struct *work)
1283 * delayed the first time we expire. So set the workqueue 1284 * delayed the first time we expire. So set the workqueue
1284 * again once we know timers are working. 1285 * again once we know timers are working.
1285 */ 1286 */
1286 if (tsc_start == -1) { 1287 if (tsc_start == ULLONG_MAX) {
1288restart:
1287 /* 1289 /*
1288 * Only set hpet once, to avoid mixing hardware 1290 * Only set hpet once, to avoid mixing hardware
1289 * if the hpet becomes enabled later. 1291 * if the hpet becomes enabled later.
1290 */ 1292 */
1291 hpet = is_hpet_enabled(); 1293 hpet = is_hpet_enabled();
1292 schedule_delayed_work(&tsc_irqwork, HZ);
1293 tsc_start = tsc_read_refs(&ref_start, hpet); 1294 tsc_start = tsc_read_refs(&ref_start, hpet);
1295 schedule_delayed_work(&tsc_irqwork, HZ);
1294 return; 1296 return;
1295 } 1297 }
1296 1298
@@ -1300,9 +1302,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
1300 if (ref_start == ref_stop) 1302 if (ref_start == ref_stop)
1301 goto out; 1303 goto out;
1302 1304
1303 /* Check, whether the sampling was disturbed by an SMI */ 1305 /* Check, whether the sampling was disturbed */
1304 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) 1306 if (tsc_stop == ULLONG_MAX)
1305 goto out; 1307 goto restart;
1306 1308
1307 delta = tsc_stop - tsc_start; 1309 delta = tsc_stop - tsc_start;
1308 delta *= 1000000LL; 1310 delta *= 1000000LL;
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 69b3a7c30013..31ecf7a76d5a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -2,10 +2,6 @@
2 2
3ccflags-y += -Iarch/x86/kvm 3ccflags-y += -Iarch/x86/kvm
4 4
5CFLAGS_x86.o := -I.
6CFLAGS_svm.o := -I.
7CFLAGS_vmx.o := -I.
8
9KVM := ../../../virt/kvm 5KVM := ../../../virt/kvm
10 6
11kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 7kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c90a5352d158..89d20ed1d2e8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1636 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); 1636 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1637 if (ret != HV_STATUS_INVALID_PORT_ID) 1637 if (ret != HV_STATUS_INVALID_PORT_ID)
1638 break; 1638 break;
1639 /* maybe userspace knows this conn_id: fall through */ 1639 /* fall through - maybe userspace knows this conn_id. */
1640 case HVCALL_POST_MESSAGE: 1640 case HVCALL_POST_MESSAGE:
1641 /* don't bother userspace if it has no way to handle it */ 1641 /* don't bother userspace if it has no way to handle it */
1642 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { 1642 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
@@ -1832,7 +1832,6 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1832 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; 1832 ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
1833 ent->eax |= HV_X64_MSR_RESET_AVAILABLE; 1833 ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
1834 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; 1834 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
1835 ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
1836 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; 1835 ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
1837 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; 1836 ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
1838 1837
@@ -1848,11 +1847,11 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1848 case HYPERV_CPUID_ENLIGHTMENT_INFO: 1847 case HYPERV_CPUID_ENLIGHTMENT_INFO:
1849 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; 1848 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
1850 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; 1849 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
1851 ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
1852 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; 1850 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
1853 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; 1851 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
1854 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; 1852 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
1855 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; 1853 if (evmcs_ver)
1854 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
1856 1855
1857 /* 1856 /*
1858 * Default number of spinlock retry attempts, matches 1857 * Default number of spinlock retry attempts, matches
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9f089e2e09d0..4b6c2da7265c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1035 switch (delivery_mode) { 1035 switch (delivery_mode) {
1036 case APIC_DM_LOWEST: 1036 case APIC_DM_LOWEST:
1037 vcpu->arch.apic_arb_prio++; 1037 vcpu->arch.apic_arb_prio++;
1038 /* fall through */
1038 case APIC_DM_FIXED: 1039 case APIC_DM_FIXED:
1039 if (unlikely(trig_mode && !level)) 1040 if (unlikely(trig_mode && !level))
1040 break; 1041 break;
@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1874 1875
1875 case APIC_LVT0: 1876 case APIC_LVT0:
1876 apic_manage_nmi_watchdog(apic, val); 1877 apic_manage_nmi_watchdog(apic, val);
1878 /* fall through */
1877 case APIC_LVTTHMR: 1879 case APIC_LVTTHMR:
1878 case APIC_LVTPC: 1880 case APIC_LVTPC:
1879 case APIC_LVT1: 1881 case APIC_LVT1:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ce770b446238..da9c42349b1f 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4371 rsvd_bits(maxphyaddr, 51); 4371 rsvd_bits(maxphyaddr, 51);
4372 rsvd_check->rsvd_bits_mask[1][4] = 4372 rsvd_check->rsvd_bits_mask[1][4] =
4373 rsvd_check->rsvd_bits_mask[0][4]; 4373 rsvd_check->rsvd_bits_mask[0][4];
4374 /* fall through */
4374 case PT64_ROOT_4LEVEL: 4375 case PT64_ROOT_4LEVEL:
4375 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | 4376 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
4376 nonleaf_bit8_rsvd | rsvd_bits(7, 7) | 4377 nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a157ca5b6869..f13a3a24d360 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
3414 kvm_mmu_reset_context(&svm->vcpu); 3414 kvm_mmu_reset_context(&svm->vcpu);
3415 kvm_mmu_load(&svm->vcpu); 3415 kvm_mmu_load(&svm->vcpu);
3416 3416
3417 /*
3418 * Drop what we picked up for L2 via svm_complete_interrupts() so it
3419 * doesn't end up in L1.
3420 */
3421 svm->vcpu.arch.nmi_injected = false;
3422 kvm_clear_exception_queue(&svm->vcpu);
3423 kvm_clear_interrupt_queue(&svm->vcpu);
3424
3417 return 0; 3425 return 0;
3418} 3426}
3419 3427
@@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4395 case MSR_IA32_APICBASE: 4403 case MSR_IA32_APICBASE:
4396 if (kvm_vcpu_apicv_active(vcpu)) 4404 if (kvm_vcpu_apicv_active(vcpu))
4397 avic_update_vapic_bar(to_svm(vcpu), data); 4405 avic_update_vapic_bar(to_svm(vcpu), data);
4398 /* Follow through */ 4406 /* Fall through */
4399 default: 4407 default:
4400 return kvm_set_msr_common(vcpu, msr); 4408 return kvm_set_msr_common(vcpu, msr);
4401 } 4409 }
@@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4504 kvm_lapic_reg_write(apic, APIC_ICR, icrl); 4512 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4505 break; 4513 break;
4506 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { 4514 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4507 int i;
4508 struct kvm_vcpu *vcpu;
4509 struct kvm *kvm = svm->vcpu.kvm;
4510 struct kvm_lapic *apic = svm->vcpu.arch.apic; 4515 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4511 4516
4512 /* 4517 /*
4513 * At this point, we expect that the AVIC HW has already 4518 * Update ICR high and low, then emulate sending IPI,
4514 * set the appropriate IRR bits on the valid target 4519 * which is handled when writing APIC_ICR.
4515 * vcpus. So, we just need to kick the appropriate vcpu.
4516 */ 4520 */
4517 kvm_for_each_vcpu(i, vcpu, kvm) { 4521 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4518 bool m = kvm_apic_match_dest(vcpu, apic, 4522 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4519 icrl & KVM_APIC_SHORT_MASK,
4520 GET_APIC_DEST_FIELD(icrh),
4521 icrl & KVM_APIC_DEST_MASK);
4522
4523 if (m && !avic_vcpu_is_running(vcpu))
4524 kvm_vcpu_wake_up(vcpu);
4525 }
4526 break; 4523 break;
4527 } 4524 }
4528 case AVIC_IPI_FAILURE_INVALID_TARGET: 4525 case AVIC_IPI_FAILURE_INVALID_TARGET:
4526 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
4527 index, svm->vcpu.vcpu_id, icrh, icrl);
4529 break; 4528 break;
4530 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: 4529 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4531 WARN_ONCE(1, "Invalid backing page\n"); 4530 WARN_ONCE(1, "Invalid backing page\n");
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 705f40ae2532..6432d08c7de7 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1465,7 +1465,7 @@ TRACE_EVENT(kvm_hv_send_ipi_ex,
1465#endif /* _TRACE_KVM_H */ 1465#endif /* _TRACE_KVM_H */
1466 1466
1467#undef TRACE_INCLUDE_PATH 1467#undef TRACE_INCLUDE_PATH
1468#define TRACE_INCLUDE_PATH arch/x86/kvm 1468#define TRACE_INCLUDE_PATH ../../arch/x86/kvm
1469#undef TRACE_INCLUDE_FILE 1469#undef TRACE_INCLUDE_FILE
1470#define TRACE_INCLUDE_FILE trace 1470#define TRACE_INCLUDE_FILE trace
1471 1471
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 95bc2247478d..5466c6d85cf3 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -332,16 +332,17 @@ int nested_enable_evmcs(struct kvm_vcpu *vcpu,
332 uint16_t *vmcs_version) 332 uint16_t *vmcs_version)
333{ 333{
334 struct vcpu_vmx *vmx = to_vmx(vcpu); 334 struct vcpu_vmx *vmx = to_vmx(vcpu);
335 bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled;
336
337 vmx->nested.enlightened_vmcs_enabled = true;
335 338
336 if (vmcs_version) 339 if (vmcs_version)
337 *vmcs_version = nested_get_evmcs_version(vcpu); 340 *vmcs_version = nested_get_evmcs_version(vcpu);
338 341
339 /* We don't support disabling the feature for simplicity. */ 342 /* We don't support disabling the feature for simplicity. */
340 if (vmx->nested.enlightened_vmcs_enabled) 343 if (evmcs_already_enabled)
341 return 0; 344 return 0;
342 345
343 vmx->nested.enlightened_vmcs_enabled = true;
344
345 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; 346 vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
346 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; 347 vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
347 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; 348 vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 2616bd2c7f2c..d8ea4ebd79e7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -55,7 +55,7 @@ static u16 shadow_read_write_fields[] = {
55static int max_shadow_read_write_fields = 55static int max_shadow_read_write_fields =
56 ARRAY_SIZE(shadow_read_write_fields); 56 ARRAY_SIZE(shadow_read_write_fields);
57 57
58void init_vmcs_shadow_fields(void) 58static void init_vmcs_shadow_fields(void)
59{ 59{
60 int i, j; 60 int i, j;
61 61
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) 211 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
212 return; 212 return;
213 213
214 hrtimer_cancel(&vmx->nested.preemption_timer);
214 vmx->nested.vmxon = false; 215 vmx->nested.vmxon = false;
215 vmx->nested.smm.vmxon = false; 216 vmx->nested.smm.vmxon = false;
216 free_vpid(vmx->nested.vpid02); 217 free_vpid(vmx->nested.vpid02);
@@ -4140,11 +4141,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4140 if (r < 0) 4141 if (r < 0)
4141 goto out_vmcs02; 4142 goto out_vmcs02;
4142 4143
4143 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 4144 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
4144 if (!vmx->nested.cached_vmcs12) 4145 if (!vmx->nested.cached_vmcs12)
4145 goto out_cached_vmcs12; 4146 goto out_cached_vmcs12;
4146 4147
4147 vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); 4148 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
4148 if (!vmx->nested.cached_shadow_vmcs12) 4149 if (!vmx->nested.cached_shadow_vmcs12)
4149 goto out_cached_shadow_vmcs12; 4150 goto out_cached_shadow_vmcs12;
4150 4151
@@ -5263,13 +5264,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5263 copy_shadow_to_vmcs12(vmx); 5264 copy_shadow_to_vmcs12(vmx);
5264 } 5265 }
5265 5266
5266 if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) 5267 /*
5268 * Copy over the full allocated size of vmcs12 rather than just the size
5269 * of the struct.
5270 */
5271 if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
5267 return -EFAULT; 5272 return -EFAULT;
5268 5273
5269 if (nested_cpu_has_shadow_vmcs(vmcs12) && 5274 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5270 vmcs12->vmcs_link_pointer != -1ull) { 5275 vmcs12->vmcs_link_pointer != -1ull) {
5271 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, 5276 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
5272 get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) 5277 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
5273 return -EFAULT; 5278 return -EFAULT;
5274 } 5279 }
5275 5280
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f6915f10e584..95d618045001 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -26,6 +26,7 @@
26#include <linux/mod_devicetable.h> 26#include <linux/mod_devicetable.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/sched/smt.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/tboot.h> 31#include <linux/tboot.h>
31#include <linux/trace_events.h> 32#include <linux/trace_events.h>
@@ -423,7 +424,7 @@ static void check_ept_pointer_match(struct kvm *kvm)
423 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; 424 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
424} 425}
425 426
426int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, 427static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
427 void *data) 428 void *data)
428{ 429{
429 struct kvm_tlb_range *range = data; 430 struct kvm_tlb_range *range = data;
@@ -1773,7 +1774,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1773 if (!msr_info->host_initiated && 1774 if (!msr_info->host_initiated &&
1774 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) 1775 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1775 return 1; 1776 return 1;
1776 /* Otherwise falls through */ 1777 /* Else, falls through */
1777 default: 1778 default:
1778 msr = find_msr_entry(vmx, msr_info->index); 1779 msr = find_msr_entry(vmx, msr_info->index);
1779 if (msr) { 1780 if (msr) {
@@ -2014,7 +2015,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2014 /* Check reserved bit, higher 32 bits should be zero */ 2015 /* Check reserved bit, higher 32 bits should be zero */
2015 if ((data >> 32) != 0) 2016 if ((data >> 32) != 0)
2016 return 1; 2017 return 1;
2017 /* Otherwise falls through */ 2018 /* Else, falls through */
2018 default: 2019 default:
2019 msr = find_msr_entry(vmx, msr_index); 2020 msr = find_msr_entry(vmx, msr_index);
2020 if (msr) { 2021 if (msr) {
@@ -2344,7 +2345,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
2344 case 37: /* AAT100 */ 2345 case 37: /* AAT100 */
2345 case 44: /* BC86,AAY89,BD102 */ 2346 case 44: /* BC86,AAY89,BD102 */
2346 case 46: /* BA97 */ 2347 case 46: /* BA97 */
2347 _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 2348 _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
2348 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 2349 _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
2349 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 2350 pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2350 "does not work properly. Using workaround\n"); 2351 "does not work properly. Using workaround\n");
@@ -6362,72 +6363,9 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
6362 vmx->loaded_vmcs->hv_timer_armed = false; 6363 vmx->loaded_vmcs->hv_timer_armed = false;
6363} 6364}
6364 6365
6365static void vmx_vcpu_run(struct kvm_vcpu *vcpu) 6366static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
6366{ 6367{
6367 struct vcpu_vmx *vmx = to_vmx(vcpu); 6368 unsigned long evmcs_rsp;
6368 unsigned long cr3, cr4, evmcs_rsp;
6369
6370 /* Record the guest's net vcpu time for enforced NMI injections. */
6371 if (unlikely(!enable_vnmi &&
6372 vmx->loaded_vmcs->soft_vnmi_blocked))
6373 vmx->loaded_vmcs->entry_time = ktime_get();
6374
6375 /* Don't enter VMX if guest state is invalid, let the exit handler
6376 start emulation until we arrive back to a valid state */
6377 if (vmx->emulation_required)
6378 return;
6379
6380 if (vmx->ple_window_dirty) {
6381 vmx->ple_window_dirty = false;
6382 vmcs_write32(PLE_WINDOW, vmx->ple_window);
6383 }
6384
6385 if (vmx->nested.need_vmcs12_sync)
6386 nested_sync_from_vmcs12(vcpu);
6387
6388 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
6389 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6390 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
6391 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6392
6393 cr3 = __get_current_cr3_fast();
6394 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
6395 vmcs_writel(HOST_CR3, cr3);
6396 vmx->loaded_vmcs->host_state.cr3 = cr3;
6397 }
6398
6399 cr4 = cr4_read_shadow();
6400 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
6401 vmcs_writel(HOST_CR4, cr4);
6402 vmx->loaded_vmcs->host_state.cr4 = cr4;
6403 }
6404
6405 /* When single-stepping over STI and MOV SS, we must clear the
6406 * corresponding interruptibility bits in the guest state. Otherwise
6407 * vmentry fails as it then expects bit 14 (BS) in pending debug
6408 * exceptions being set, but that's not correct for the guest debugging
6409 * case. */
6410 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6411 vmx_set_interrupt_shadow(vcpu, 0);
6412
6413 if (static_cpu_has(X86_FEATURE_PKU) &&
6414 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
6415 vcpu->arch.pkru != vmx->host_pkru)
6416 __write_pkru(vcpu->arch.pkru);
6417
6418 pt_guest_enter(vmx);
6419
6420 atomic_switch_perf_msrs(vmx);
6421
6422 vmx_update_hv_timer(vcpu);
6423
6424 /*
6425 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
6426 * it's non-zero. Since vmentry is serialising on affected CPUs, there
6427 * is no need to worry about the conditional branch over the wrmsr
6428 * being speculatively taken.
6429 */
6430 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6431 6369
6432 vmx->__launched = vmx->loaded_vmcs->launched; 6370 vmx->__launched = vmx->loaded_vmcs->launched;
6433 6371
@@ -6567,6 +6505,77 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6567 , "eax", "ebx", "edi" 6505 , "eax", "ebx", "edi"
6568#endif 6506#endif
6569 ); 6507 );
6508}
6509STACK_FRAME_NON_STANDARD(__vmx_vcpu_run);
6510
6511static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6512{
6513 struct vcpu_vmx *vmx = to_vmx(vcpu);
6514 unsigned long cr3, cr4;
6515
6516 /* Record the guest's net vcpu time for enforced NMI injections. */
6517 if (unlikely(!enable_vnmi &&
6518 vmx->loaded_vmcs->soft_vnmi_blocked))
6519 vmx->loaded_vmcs->entry_time = ktime_get();
6520
6521 /* Don't enter VMX if guest state is invalid, let the exit handler
6522 start emulation until we arrive back to a valid state */
6523 if (vmx->emulation_required)
6524 return;
6525
6526 if (vmx->ple_window_dirty) {
6527 vmx->ple_window_dirty = false;
6528 vmcs_write32(PLE_WINDOW, vmx->ple_window);
6529 }
6530
6531 if (vmx->nested.need_vmcs12_sync)
6532 nested_sync_from_vmcs12(vcpu);
6533
6534 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
6535 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6536 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
6537 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6538
6539 cr3 = __get_current_cr3_fast();
6540 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
6541 vmcs_writel(HOST_CR3, cr3);
6542 vmx->loaded_vmcs->host_state.cr3 = cr3;
6543 }
6544
6545 cr4 = cr4_read_shadow();
6546 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
6547 vmcs_writel(HOST_CR4, cr4);
6548 vmx->loaded_vmcs->host_state.cr4 = cr4;
6549 }
6550
6551 /* When single-stepping over STI and MOV SS, we must clear the
6552 * corresponding interruptibility bits in the guest state. Otherwise
6553 * vmentry fails as it then expects bit 14 (BS) in pending debug
6554 * exceptions being set, but that's not correct for the guest debugging
6555 * case. */
6556 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6557 vmx_set_interrupt_shadow(vcpu, 0);
6558
6559 if (static_cpu_has(X86_FEATURE_PKU) &&
6560 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
6561 vcpu->arch.pkru != vmx->host_pkru)
6562 __write_pkru(vcpu->arch.pkru);
6563
6564 pt_guest_enter(vmx);
6565
6566 atomic_switch_perf_msrs(vmx);
6567
6568 vmx_update_hv_timer(vcpu);
6569
6570 /*
6571 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
6572 * it's non-zero. Since vmentry is serialising on affected CPUs, there
6573 * is no need to worry about the conditional branch over the wrmsr
6574 * being speculatively taken.
6575 */
6576 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6577
6578 __vmx_vcpu_run(vcpu, vmx);
6570 6579
6571 /* 6580 /*
6572 * We do not use IBRS in the kernel. If this vCPU has used the 6581 * We do not use IBRS in the kernel. If this vCPU has used the
@@ -6648,7 +6657,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6648 vmx_recover_nmi_blocking(vmx); 6657 vmx_recover_nmi_blocking(vmx);
6649 vmx_complete_interrupts(vmx); 6658 vmx_complete_interrupts(vmx);
6650} 6659}
6651STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
6652 6660
6653static struct kvm *vmx_vm_alloc(void) 6661static struct kvm *vmx_vm_alloc(void)
6654{ 6662{
@@ -6816,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm)
6816 * Warn upon starting the first VM in a potentially 6824 * Warn upon starting the first VM in a potentially
6817 * insecure environment. 6825 * insecure environment.
6818 */ 6826 */
6819 if (cpu_smt_control == CPU_SMT_ENABLED) 6827 if (sched_smt_active())
6820 pr_warn_once(L1TF_MSG_SMT); 6828 pr_warn_once(L1TF_MSG_SMT);
6821 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6829 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
6822 pr_warn_once(L1TF_MSG_L1D); 6830 pr_warn_once(L1TF_MSG_L1D);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 02c8e095a239..e67ecf25e690 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3834 case KVM_CAP_HYPERV_SYNIC2: 3834 case KVM_CAP_HYPERV_SYNIC2:
3835 if (cap->args[0]) 3835 if (cap->args[0])
3836 return -EINVAL; 3836 return -EINVAL;
3837 /* fall through */
3838
3837 case KVM_CAP_HYPERV_SYNIC: 3839 case KVM_CAP_HYPERV_SYNIC:
3838 if (!irqchip_in_kernel(vcpu->kvm)) 3840 if (!irqchip_in_kernel(vcpu->kvm))
3839 return -EINVAL; 3841 return -EINVAL;
@@ -5114,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
5114{ 5116{
5115 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5117 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
5116 5118
5119 /*
5120 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5121 * is returned, but our callers are not ready for that and they blindly
5122 * call kvm_inject_page_fault. Ensure that they at least do not leak
5123 * uninitialized kernel stack memory into cr2 and error code.
5124 */
5125 memset(exception, 0, sizeof(*exception));
5117 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 5126 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
5118 exception); 5127 exception);
5119} 5128}
@@ -6480,8 +6489,7 @@ restart:
6480 toggle_interruptibility(vcpu, ctxt->interruptibility); 6489 toggle_interruptibility(vcpu, ctxt->interruptibility);
6481 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6490 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6482 kvm_rip_write(vcpu, ctxt->eip); 6491 kvm_rip_write(vcpu, ctxt->eip);
6483 if (r == EMULATE_DONE && 6492 if (r == EMULATE_DONE && ctxt->tf)
6484 (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
6485 kvm_vcpu_do_singlestep(vcpu, &r); 6493 kvm_vcpu_do_singlestep(vcpu, &r);
6486 if (!ctxt->have_exception || 6494 if (!ctxt->have_exception ||
6487 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 6495 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -7093,10 +7101,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
7093 case KVM_HC_CLOCK_PAIRING: 7101 case KVM_HC_CLOCK_PAIRING:
7094 ret = kvm_pv_clock_pairing(vcpu, a0, a1); 7102 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
7095 break; 7103 break;
7104#endif
7096 case KVM_HC_SEND_IPI: 7105 case KVM_HC_SEND_IPI:
7097 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); 7106 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
7098 break; 7107 break;
7099#endif
7100 default: 7108 default:
7101 ret = -KVM_ENOSYS; 7109 ret = -KVM_ENOSYS;
7102 break; 7110 break;
@@ -7937,6 +7945,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
7937 vcpu->arch.pv.pv_unhalted = false; 7945 vcpu->arch.pv.pv_unhalted = false;
7938 vcpu->arch.mp_state = 7946 vcpu->arch.mp_state =
7939 KVM_MP_STATE_RUNNABLE; 7947 KVM_MP_STATE_RUNNABLE;
7948 /* fall through */
7940 case KVM_MP_STATE_RUNNABLE: 7949 case KVM_MP_STATE_RUNNABLE:
7941 vcpu->arch.apf.halted = false; 7950 vcpu->arch.apf.halted = false;
7942 break; 7951 break;
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 66894675f3c8..df50451d94ef 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -2,8 +2,11 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/io.h> 3#include <linux/io.h>
4 4
5#define movs(type,to,from) \
6 asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
7
5/* Originally from i386/string.h */ 8/* Originally from i386/string.h */
6static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) 9static __always_inline void rep_movs(void *to, const void *from, size_t n)
7{ 10{
8 unsigned long d0, d1, d2; 11 unsigned long d0, d1, d2;
9 asm volatile("rep ; movsl\n\t" 12 asm volatile("rep ; movsl\n\t"
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n)
21 24
22void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) 25void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
23{ 26{
24 __iomem_memcpy(to, (const void *)from, n); 27 if (unlikely(!n))
28 return;
29
30 /* Align any unaligned source IO */
31 if (unlikely(1 & (unsigned long)from)) {
32 movs("b", to, from);
33 n--;
34 }
35 if (n > 1 && unlikely(2 & (unsigned long)from)) {
36 movs("w", to, from);
37 n-=2;
38 }
39 rep_movs(to, (const void *)from, n);
25} 40}
26EXPORT_SYMBOL(memcpy_fromio); 41EXPORT_SYMBOL(memcpy_fromio);
27 42
28void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) 43void memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
29{ 44{
30 __iomem_memcpy((void *)to, (const void *) from, n); 45 if (unlikely(!n))
46 return;
47
48 /* Align any unaligned destination IO */
49 if (unlikely(1 & (unsigned long)to)) {
50 movs("b", to, from);
51 n--;
52 }
53 if (n > 1 && unlikely(2 & (unsigned long)to)) {
54 movs("w", to, from);
55 n-=2;
56 }
57 rep_movs((void *)to, (const void *) from, n);
31} 58}
32EXPORT_SYMBOL(memcpy_toio); 59EXPORT_SYMBOL(memcpy_toio);
33 60
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 79778ab200e4..a53665116458 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
36 u16 status, timer; 36 u16 status, timer;
37 37
38 do { 38 do {
39 outb(I8254_PORT_CONTROL, 39 outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
40 I8254_CMD_READBACK | I8254_SELECT_COUNTER0); 40 I8254_PORT_CONTROL);
41 status = inb(I8254_PORT_COUNTER0); 41 status = inb(I8254_PORT_COUNTER0);
42 timer = inb(I8254_PORT_COUNTER0); 42 timer = inb(I8254_PORT_COUNTER0);
43 timer |= inb(I8254_PORT_COUNTER0) << 8; 43 timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2ff25ad33233..9d5c75f02295 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
595 return; 595 return;
596 } 596 }
597 597
598 addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); 598 addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24);
599#ifdef CONFIG_X86_64 599#ifdef CONFIG_X86_64
600 addr |= ((u64)desc.base3 << 32); 600 addr |= ((u64)desc.base3 << 32);
601#endif 601#endif
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index a19ef1a416ff..4aa9b1480866 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
158 pmd = pmd_offset(pud, ppd->vaddr); 158 pmd = pmd_offset(pud, ppd->vaddr);
159 if (pmd_none(*pmd)) { 159 if (pmd_none(*pmd)) {
160 pte = ppd->pgtable_area; 160 pte = ppd->pgtable_area;
161 memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); 161 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
162 ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; 162 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); 163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
164 } 164 }
165 165
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 20a0756f27ef..ce91682770cb 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI
164 If unsure, say N. 164 If unsure, say N.
165 165
166config XTENSA_UNALIGNED_USER 166config XTENSA_UNALIGNED_USER
167 bool "Unaligned memory access in use space" 167 bool "Unaligned memory access in user space"
168 help 168 help
169 The Xtensa architecture currently does not handle unaligned 169 The Xtensa architecture currently does not handle unaligned
170 memory accesses in hardware but through an exception handler. 170 memory accesses in hardware but through an exception handler.
@@ -451,7 +451,7 @@ config USE_OF
451 help 451 help
452 Include support for flattened device tree machine descriptions. 452 Include support for flattened device tree machine descriptions.
453 453
454config BUILTIN_DTB 454config BUILTIN_DTB_SOURCE
455 string "DTB to build into the kernel image" 455 string "DTB to build into the kernel image"
456 depends on OF 456 depends on OF
457 457
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index f8052ba5aea8..0b8d00cdae7c 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,9 +7,9 @@
7# 7#
8# 8#
9 9
10BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o 10BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
11ifneq ($(CONFIG_BUILTIN_DTB),"") 11ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
12obj-$(CONFIG_OF) += $(BUILTIN_DTB) 12obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
13endif 13endif
14 14
15# for CONFIG_OF_ALL_DTBS test 15# for CONFIG_OF_ALL_DTBS test
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 2bf964df37ba..f378e56f9ce6 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
34CONFIG_CMDLINE_BOOL=y 34CONFIG_CMDLINE_BOOL=y
35CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 35CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
36CONFIG_USE_OF=y 36CONFIG_USE_OF=y
37CONFIG_BUILTIN_DTB="kc705" 37CONFIG_BUILTIN_DTB_SOURCE="kc705"
38# CONFIG_COMPACTION is not set 38# CONFIG_COMPACTION is not set
39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
40CONFIG_PM=y 40CONFIG_PM=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index 3221b7053fa3..62f32a902568 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y
38# CONFIG_PCI is not set 38# CONFIG_PCI is not set
39CONFIG_XTENSA_PLATFORM_XTFPGA=y 39CONFIG_XTENSA_PLATFORM_XTFPGA=y
40CONFIG_USE_OF=y 40CONFIG_USE_OF=y
41CONFIG_BUILTIN_DTB="csp" 41CONFIG_BUILTIN_DTB_SOURCE="csp"
42# CONFIG_COMPACTION is not set 42# CONFIG_COMPACTION is not set
43CONFIG_XTFPGA_LCD=y 43CONFIG_XTFPGA_LCD=y
44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 985fa8546e4e..8bebe07f1060 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
33CONFIG_CMDLINE_BOOL=y 33CONFIG_CMDLINE_BOOL=y
34CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" 34CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0"
35CONFIG_USE_OF=y 35CONFIG_USE_OF=y
36CONFIG_BUILTIN_DTB="kc705" 36CONFIG_BUILTIN_DTB_SOURCE="kc705"
37# CONFIG_COMPACTION is not set 37# CONFIG_COMPACTION is not set
38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 38# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
39CONFIG_NET=y 39CONFIG_NET=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index f3fc4f970ca8..933ab2adf434 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y
39CONFIG_CMDLINE_BOOL=y 39CONFIG_CMDLINE_BOOL=y
40CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" 40CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
41CONFIG_USE_OF=y 41CONFIG_USE_OF=y
42CONFIG_BUILTIN_DTB="kc705_nommu" 42CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu"
43CONFIG_BINFMT_FLAT=y 43CONFIG_BINFMT_FLAT=y
44CONFIG_NET=y 44CONFIG_NET=y
45CONFIG_PACKET=y 45CONFIG_PACKET=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c06a7c..e29c5b179a5b 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,11 +33,12 @@ CONFIG_SMP=y
33CONFIG_HOTPLUG_CPU=y 33CONFIG_HOTPLUG_CPU=y
34# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set 34# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
35# CONFIG_PCI is not set 35# CONFIG_PCI is not set
36CONFIG_VECTORS_OFFSET=0x00002000
36CONFIG_XTENSA_PLATFORM_XTFPGA=y 37CONFIG_XTENSA_PLATFORM_XTFPGA=y
37CONFIG_CMDLINE_BOOL=y 38CONFIG_CMDLINE_BOOL=y
38CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" 39CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
39CONFIG_USE_OF=y 40CONFIG_USE_OF=y
40CONFIG_BUILTIN_DTB="lx200mx" 41CONFIG_BUILTIN_DTB_SOURCE="lx200mx"
41# CONFIG_COMPACTION is not set 42# CONFIG_COMPACTION is not set
42# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 43# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
43CONFIG_NET=y 44CONFIG_NET=y
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index da08e75100ab..7f009719304e 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -276,12 +276,13 @@ should_never_return:
276 276
277 movi a2, cpu_start_ccount 277 movi a2, cpu_start_ccount
2781: 2781:
279 memw
279 l32i a3, a2, 0 280 l32i a3, a2, 0
280 beqi a3, 0, 1b 281 beqi a3, 0, 1b
281 movi a3, 0 282 movi a3, 0
282 s32i a3, a2, 0 283 s32i a3, a2, 0
283 memw
2841: 2841:
285 memw
285 l32i a3, a2, 0 286 l32i a3, a2, 0
286 beqi a3, 0, 1b 287 beqi a3, 0, 1b
287 wsr a3, ccount 288 wsr a3, ccount
@@ -317,11 +318,13 @@ ENTRY(cpu_restart)
317 rsr a0, prid 318 rsr a0, prid
318 neg a2, a0 319 neg a2, a0
319 movi a3, cpu_start_id 320 movi a3, cpu_start_id
321 memw
320 s32i a2, a3, 0 322 s32i a2, a3, 0
321#if XCHAL_DCACHE_IS_WRITEBACK 323#if XCHAL_DCACHE_IS_WRITEBACK
322 dhwbi a3, 0 324 dhwbi a3, 0
323#endif 325#endif
3241: 3261:
327 memw
325 l32i a2, a3, 0 328 l32i a2, a3, 0
326 dhi a3, 0 329 dhi a3, 0
327 bne a2, a0, 1b 330 bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d64689bac..be1f280c322c 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
83{ 83{
84 unsigned i; 84 unsigned i;
85 85
86 for (i = 0; i < max_cpus; ++i) 86 for_each_possible_cpu(i)
87 set_cpu_present(i, true); 87 set_cpu_present(i, true);
88} 88}
89 89
@@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
96 pr_info("%s: Core Count = %d\n", __func__, ncpus); 96 pr_info("%s: Core Count = %d\n", __func__, ncpus);
97 pr_info("%s: Core Id = %d\n", __func__, core_id); 97 pr_info("%s: Core Id = %d\n", __func__, core_id);
98 98
99 if (ncpus > NR_CPUS) {
100 ncpus = NR_CPUS;
101 pr_info("%s: limiting core count by %d\n", __func__, ncpus);
102 }
103
99 for (i = 0; i < ncpus; ++i) 104 for (i = 0; i < ncpus; ++i)
100 set_cpu_possible(i, true); 105 set_cpu_possible(i, true);
101} 106}
@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
195 int i; 200 int i;
196 201
197#ifdef CONFIG_HOTPLUG_CPU 202#ifdef CONFIG_HOTPLUG_CPU
198 cpu_start_id = cpu; 203 WRITE_ONCE(cpu_start_id, cpu);
199 system_flush_invalidate_dcache_range( 204 /* Pairs with the third memw in the cpu_restart */
200 (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); 205 mb();
206 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
207 sizeof(cpu_start_id));
201#endif 208#endif
202 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); 209 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
203 210
@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
206 ccount = get_ccount(); 213 ccount = get_ccount();
207 while (!ccount); 214 while (!ccount);
208 215
209 cpu_start_ccount = ccount; 216 WRITE_ONCE(cpu_start_ccount, ccount);
210 217
211 while (time_before(jiffies, timeout)) { 218 do {
219 /*
220 * Pairs with the first two memws in the
221 * .Lboot_secondary.
222 */
212 mb(); 223 mb();
213 if (!cpu_start_ccount) 224 ccount = READ_ONCE(cpu_start_ccount);
214 break; 225 } while (ccount && time_before(jiffies, timeout));
215 }
216 226
217 if (cpu_start_ccount) { 227 if (ccount) {
218 smp_call_function_single(0, mx_cpu_stop, 228 smp_call_function_single(0, mx_cpu_stop,
219 (void *)cpu, 1); 229 (void *)cpu, 1);
220 cpu_start_ccount = 0; 230 WRITE_ONCE(cpu_start_ccount, 0);
221 return -EIO; 231 return -EIO;
222 } 232 }
223 } 233 }
@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
237 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", 247 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
238 __func__, cpu, idle, start_info.stack); 248 __func__, cpu, idle, start_info.stack);
239 249
250 init_completion(&cpu_running);
240 ret = boot_secondary(cpu, idle); 251 ret = boot_secondary(cpu, idle);
241 if (ret == 0) { 252 if (ret == 0) {
242 wait_for_completion_timeout(&cpu_running, 253 wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
298 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 309 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
299 while (time_before(jiffies, timeout)) { 310 while (time_before(jiffies, timeout)) {
300 system_invalidate_dcache_range((unsigned long)&cpu_start_id, 311 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
301 sizeof(cpu_start_id)); 312 sizeof(cpu_start_id));
302 if (cpu_start_id == -cpu) { 313 /* Pairs with the second memw in the cpu_restart */
314 mb();
315 if (READ_ONCE(cpu_start_id) == -cpu) {
303 platform_cpu_kill(cpu); 316 platform_cpu_kill(cpu);
304 return; 317 return;
305 } 318 }
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a54d2ab..378186b5eb40 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
89 container_of(evt, struct ccount_timer, evt); 89 container_of(evt, struct ccount_timer, evt);
90 90
91 if (timer->irq_enabled) { 91 if (timer->irq_enabled) {
92 disable_irq(evt->irq); 92 disable_irq_nosync(evt->irq);
93 timer->irq_enabled = 0; 93 timer->irq_enabled = 0;
94 } 94 }
95 return 0; 95 return 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c5f61ceeb67..6b78ec56a4f2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
462 kblockd_schedule_work(&q->timeout_work); 462 kblockd_schedule_work(&q->timeout_work);
463} 463}
464 464
465static void blk_timeout_work(struct work_struct *work)
466{
467}
468
465/** 469/**
466 * blk_alloc_queue_node - allocate a request queue 470 * blk_alloc_queue_node - allocate a request queue
467 * @gfp_mask: memory allocation flags 471 * @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
505 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, 509 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
506 laptop_mode_timer_fn, 0); 510 laptop_mode_timer_fn, 0);
507 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); 511 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
508 INIT_WORK(&q->timeout_work, NULL); 512 INIT_WORK(&q->timeout_work, blk_timeout_work);
509 INIT_LIST_HEAD(&q->icq_list); 513 INIT_LIST_HEAD(&q->icq_list);
510#ifdef CONFIG_BLK_CGROUP 514#ifdef CONFIG_BLK_CGROUP
511 INIT_LIST_HEAD(&q->blkg_list); 515 INIT_LIST_HEAD(&q->blkg_list);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a3fc7191c694..6e0f2d97fc6d 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); 335 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); 336 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
337 337
338 blk_mq_run_hw_queue(hctx, true); 338 blk_mq_sched_restart(hctx);
339} 339}
340 340
341/** 341/**
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 90d68760af08..f8120832ca7b 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
308 CMD_FLAG_NAME(PREFLUSH), 308 CMD_FLAG_NAME(PREFLUSH),
309 CMD_FLAG_NAME(RAHEAD), 309 CMD_FLAG_NAME(RAHEAD),
310 CMD_FLAG_NAME(BACKGROUND), 310 CMD_FLAG_NAME(BACKGROUND),
311 CMD_FLAG_NAME(NOUNMAP),
312 CMD_FLAG_NAME(NOWAIT), 311 CMD_FLAG_NAME(NOWAIT),
312 CMD_FLAG_NAME(NOUNMAP),
313 CMD_FLAG_NAME(HIPRI),
313}; 314};
314#undef CMD_FLAG_NAME 315#undef CMD_FLAG_NAME
315 316
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index f0c56649775f..fd166fbb0f65 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
597 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio); 597 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
598} 598}
599 599
600void wbt_issue(struct rq_qos *rqos, struct request *rq) 600static void wbt_issue(struct rq_qos *rqos, struct request *rq)
601{ 601{
602 struct rq_wb *rwb = RQWB(rqos); 602 struct rq_wb *rwb = RQWB(rqos);
603 603
@@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
617 } 617 }
618} 618}
619 619
620void wbt_requeue(struct rq_qos *rqos, struct request *rq) 620static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
621{ 621{
622 struct rq_wb *rwb = RQWB(rqos); 622 struct rq_wb *rwb = RQWB(rqos);
623 if (!rwb_enabled(rwb)) 623 if (!rwb_enabled(rwb))
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 5143e11e3b0f..e18ade5d74e9 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -409,6 +409,32 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
409 return true; 409 return true;
410} 410}
411 411
412static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
413 struct nd_cmd_pkg *call_pkg)
414{
415 if (call_pkg) {
416 int i;
417
418 if (nfit_mem->family != call_pkg->nd_family)
419 return -ENOTTY;
420
421 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
422 if (call_pkg->nd_reserved2[i])
423 return -EINVAL;
424 return call_pkg->nd_command;
425 }
426
427 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
428 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
429 return cmd;
430
431 /*
432 * Force function number validation to fail since 0 is never
433 * published as a valid function in dsm_mask.
434 */
435 return 0;
436}
437
412int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 438int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
413 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 439 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
414{ 440{
@@ -422,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
422 unsigned long cmd_mask, dsm_mask; 448 unsigned long cmd_mask, dsm_mask;
423 u32 offset, fw_status = 0; 449 u32 offset, fw_status = 0;
424 acpi_handle handle; 450 acpi_handle handle;
425 unsigned int func;
426 const guid_t *guid; 451 const guid_t *guid;
427 int rc, i; 452 int func, rc, i;
428 453
429 if (cmd_rc) 454 if (cmd_rc)
430 *cmd_rc = -EINVAL; 455 *cmd_rc = -EINVAL;
431 func = cmd;
432 if (cmd == ND_CMD_CALL) {
433 call_pkg = buf;
434 func = call_pkg->nd_command;
435
436 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
437 if (call_pkg->nd_reserved2[i])
438 return -EINVAL;
439 }
440 456
441 if (nvdimm) { 457 if (nvdimm) {
442 struct acpi_device *adev = nfit_mem->adev; 458 struct acpi_device *adev = nfit_mem->adev;
443 459
444 if (!adev) 460 if (!adev)
445 return -ENOTTY; 461 return -ENOTTY;
446 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
447 return -ENOTTY;
448 462
463 if (cmd == ND_CMD_CALL)
464 call_pkg = buf;
465 func = cmd_to_func(nfit_mem, cmd, call_pkg);
466 if (func < 0)
467 return func;
449 dimm_name = nvdimm_name(nvdimm); 468 dimm_name = nvdimm_name(nvdimm);
450 cmd_name = nvdimm_cmd_name(cmd); 469 cmd_name = nvdimm_cmd_name(cmd);
451 cmd_mask = nvdimm_cmd_mask(nvdimm); 470 cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -456,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
456 } else { 475 } else {
457 struct acpi_device *adev = to_acpi_dev(acpi_desc); 476 struct acpi_device *adev = to_acpi_dev(acpi_desc);
458 477
478 func = cmd;
459 cmd_name = nvdimm_bus_cmd_name(cmd); 479 cmd_name = nvdimm_bus_cmd_name(cmd);
460 cmd_mask = nd_desc->cmd_mask; 480 cmd_mask = nd_desc->cmd_mask;
461 dsm_mask = cmd_mask; 481 dsm_mask = cmd_mask;
@@ -470,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
470 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 490 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
471 return -ENOTTY; 491 return -ENOTTY;
472 492
473 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 493 /*
494 * Check for a valid command. For ND_CMD_CALL, we also have to
495 * make sure that the DSM function is supported.
496 */
497 if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
498 return -ENOTTY;
499 else if (!test_bit(cmd, &cmd_mask))
474 return -ENOTTY; 500 return -ENOTTY;
475 501
476 in_obj.type = ACPI_TYPE_PACKAGE; 502 in_obj.type = ACPI_TYPE_PACKAGE;
@@ -1867,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1867 return 0; 1893 return 0;
1868 } 1894 }
1869 1895
1896 /*
1897 * Function 0 is the command interrogation function, don't
1898 * export it to potential userspace use, and enable it to be
1899 * used as an error value in acpi_nfit_ctl().
1900 */
1901 dsm_mask &= ~1UL;
1902
1870 guid = to_nfit_uuid(nfit_mem->family); 1903 guid = to_nfit_uuid(nfit_mem->family);
1871 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1904 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1872 if (acpi_check_dsm(adev_dimm->handle, guid, 1905 if (acpi_check_dsm(adev_dimm->handle, guid,
@@ -2042,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2042 if (!nvdimm) 2075 if (!nvdimm)
2043 continue; 2076 continue;
2044 2077
2045 rc = nvdimm_security_setup_events(nvdimm);
2046 if (rc < 0)
2047 dev_warn(acpi_desc->dev,
2048 "security event setup failed: %d\n", rc);
2049
2050 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 2078 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2051 if (nfit_kernfs) 2079 if (nfit_kernfs)
2052 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 2080 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7496b10532aa..6a2185eb66c5 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -11,6 +11,7 @@
11#include <linux/kdev_t.h> 11#include <linux/kdev_t.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/namei.h>
14#include <linux/magic.h> 15#include <linux/magic.h>
15#include <linux/major.h> 16#include <linux/major.h>
16#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
@@ -20,6 +21,7 @@
20#include <linux/parser.h> 21#include <linux/parser.h>
21#include <linux/radix-tree.h> 22#include <linux/radix-tree.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/seq_file.h>
23#include <linux/slab.h> 25#include <linux/slab.h>
24#include <linux/spinlock_types.h> 26#include <linux/spinlock_types.h>
25#include <linux/stddef.h> 27#include <linux/stddef.h>
@@ -30,7 +32,7 @@
30#include <linux/xarray.h> 32#include <linux/xarray.h>
31#include <uapi/asm-generic/errno-base.h> 33#include <uapi/asm-generic/errno-base.h>
32#include <uapi/linux/android/binder.h> 34#include <uapi/linux/android/binder.h>
33#include <uapi/linux/android/binder_ctl.h> 35#include <uapi/linux/android/binderfs.h>
34 36
35#include "binder_internal.h" 37#include "binder_internal.h"
36 38
@@ -39,14 +41,32 @@
39#define INODE_OFFSET 3 41#define INODE_OFFSET 3
40#define INTSTRLEN 21 42#define INTSTRLEN 21
41#define BINDERFS_MAX_MINOR (1U << MINORBITS) 43#define BINDERFS_MAX_MINOR (1U << MINORBITS)
42 44/* Ensure that the initial ipc namespace always has devices available. */
43static struct vfsmount *binderfs_mnt; 45#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
44 46
45static dev_t binderfs_dev; 47static dev_t binderfs_dev;
46static DEFINE_MUTEX(binderfs_minors_mutex); 48static DEFINE_MUTEX(binderfs_minors_mutex);
47static DEFINE_IDA(binderfs_minors); 49static DEFINE_IDA(binderfs_minors);
48 50
49/** 51/**
52 * binderfs_mount_opts - mount options for binderfs
53 * @max: maximum number of allocatable binderfs binder devices
54 */
55struct binderfs_mount_opts {
56 int max;
57};
58
59enum {
60 Opt_max,
61 Opt_err
62};
63
64static const match_table_t tokens = {
65 { Opt_max, "max=%d" },
66 { Opt_err, NULL }
67};
68
69/**
50 * binderfs_info - information about a binderfs mount 70 * binderfs_info - information about a binderfs mount
51 * @ipc_ns: The ipc namespace the binderfs mount belongs to. 71 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
52 * @control_dentry: This records the dentry of this binderfs mount 72 * @control_dentry: This records the dentry of this binderfs mount
@@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
55 * created. 75 * created.
56 * @root_gid: gid that needs to be used when a new binder device is 76 * @root_gid: gid that needs to be used when a new binder device is
57 * created. 77 * created.
78 * @mount_opts: The mount options in use.
79 * @device_count: The current number of allocated binder devices.
58 */ 80 */
59struct binderfs_info { 81struct binderfs_info {
60 struct ipc_namespace *ipc_ns; 82 struct ipc_namespace *ipc_ns;
61 struct dentry *control_dentry; 83 struct dentry *control_dentry;
62 kuid_t root_uid; 84 kuid_t root_uid;
63 kgid_t root_gid; 85 kgid_t root_gid;
64 86 struct binderfs_mount_opts mount_opts;
87 int device_count;
65}; 88};
66 89
67static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) 90static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
@@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
84 * @userp: buffer to copy information about new device for userspace to 107 * @userp: buffer to copy information about new device for userspace to
85 * @req: struct binderfs_device as copied from userspace 108 * @req: struct binderfs_device as copied from userspace
86 * 109 *
87 * This function allocated a new binder_device and reserves a new minor 110 * This function allocates a new binder_device and reserves a new minor
88 * number for it. 111 * number for it.
89 * Minor numbers are limited and tracked globally in binderfs_minors. The 112 * Minor numbers are limited and tracked globally in binderfs_minors. The
90 * function will stash a struct binder_device for the specific binder 113 * function will stash a struct binder_device for the specific binder
@@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
100 struct binderfs_device *req) 123 struct binderfs_device *req)
101{ 124{
102 int minor, ret; 125 int minor, ret;
103 struct dentry *dentry, *dup, *root; 126 struct dentry *dentry, *root;
104 struct binder_device *device; 127 struct binder_device *device;
105 size_t name_len = BINDERFS_MAX_NAME + 1;
106 char *name = NULL; 128 char *name = NULL;
129 size_t name_len;
107 struct inode *inode = NULL; 130 struct inode *inode = NULL;
108 struct super_block *sb = ref_inode->i_sb; 131 struct super_block *sb = ref_inode->i_sb;
109 struct binderfs_info *info = sb->s_fs_info; 132 struct binderfs_info *info = sb->s_fs_info;
133#if defined(CONFIG_IPC_NS)
134 bool use_reserve = (info->ipc_ns == &init_ipc_ns);
135#else
136 bool use_reserve = true;
137#endif
110 138
111 /* Reserve new minor number for the new device. */ 139 /* Reserve new minor number for the new device. */
112 mutex_lock(&binderfs_minors_mutex); 140 mutex_lock(&binderfs_minors_mutex);
113 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 141 if (++info->device_count <= info->mount_opts.max)
114 mutex_unlock(&binderfs_minors_mutex); 142 minor = ida_alloc_max(&binderfs_minors,
115 if (minor < 0) 143 use_reserve ? BINDERFS_MAX_MINOR :
144 BINDERFS_MAX_MINOR_CAPPED,
145 GFP_KERNEL);
146 else
147 minor = -ENOSPC;
148 if (minor < 0) {
149 --info->device_count;
150 mutex_unlock(&binderfs_minors_mutex);
116 return minor; 151 return minor;
152 }
153 mutex_unlock(&binderfs_minors_mutex);
117 154
118 ret = -ENOMEM; 155 ret = -ENOMEM;
119 device = kzalloc(sizeof(*device), GFP_KERNEL); 156 device = kzalloc(sizeof(*device), GFP_KERNEL);
@@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
132 inode->i_uid = info->root_uid; 169 inode->i_uid = info->root_uid;
133 inode->i_gid = info->root_gid; 170 inode->i_gid = info->root_gid;
134 171
135 name = kmalloc(name_len, GFP_KERNEL); 172 req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
173 name_len = strlen(req->name);
174 /* Make sure to include terminating NUL byte */
175 name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
136 if (!name) 176 if (!name)
137 goto err; 177 goto err;
138 178
139 strscpy(name, req->name, name_len);
140
141 device->binderfs_inode = inode; 179 device->binderfs_inode = inode;
142 device->context.binder_context_mgr_uid = INVALID_UID; 180 device->context.binder_context_mgr_uid = INVALID_UID;
143 device->context.name = name; 181 device->context.name = name;
@@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
156 194
157 root = sb->s_root; 195 root = sb->s_root;
158 inode_lock(d_inode(root)); 196 inode_lock(d_inode(root));
159 dentry = d_alloc_name(root, name); 197
160 if (!dentry) { 198 /* look it up */
199 dentry = lookup_one_len(name, root, name_len);
200 if (IS_ERR(dentry)) {
161 inode_unlock(d_inode(root)); 201 inode_unlock(d_inode(root));
162 ret = -ENOMEM; 202 ret = PTR_ERR(dentry);
163 goto err; 203 goto err;
164 } 204 }
165 205
166 /* Verify that the name userspace gave us is not already in use. */ 206 if (d_really_is_positive(dentry)) {
167 dup = d_lookup(root, &dentry->d_name); 207 /* already exists */
168 if (dup) { 208 dput(dentry);
169 if (d_really_is_positive(dup)) { 209 inode_unlock(d_inode(root));
170 dput(dup); 210 ret = -EEXIST;
171 dput(dentry); 211 goto err;
172 inode_unlock(d_inode(root));
173 ret = -EEXIST;
174 goto err;
175 }
176 dput(dup);
177 } 212 }
178 213
179 inode->i_private = device; 214 inode->i_private = device;
180 d_add(dentry, inode); 215 d_instantiate(dentry, inode);
181 fsnotify_create(root->d_inode, dentry); 216 fsnotify_create(root->d_inode, dentry);
182 inode_unlock(d_inode(root)); 217 inode_unlock(d_inode(root));
183 218
@@ -187,6 +222,7 @@ err:
187 kfree(name); 222 kfree(name);
188 kfree(device); 223 kfree(device);
189 mutex_lock(&binderfs_minors_mutex); 224 mutex_lock(&binderfs_minors_mutex);
225 --info->device_count;
190 ida_free(&binderfs_minors, minor); 226 ida_free(&binderfs_minors, minor);
191 mutex_unlock(&binderfs_minors_mutex); 227 mutex_unlock(&binderfs_minors_mutex);
192 iput(inode); 228 iput(inode);
@@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
232static void binderfs_evict_inode(struct inode *inode) 268static void binderfs_evict_inode(struct inode *inode)
233{ 269{
234 struct binder_device *device = inode->i_private; 270 struct binder_device *device = inode->i_private;
271 struct binderfs_info *info = BINDERFS_I(inode);
235 272
236 clear_inode(inode); 273 clear_inode(inode);
237 274
@@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
239 return; 276 return;
240 277
241 mutex_lock(&binderfs_minors_mutex); 278 mutex_lock(&binderfs_minors_mutex);
279 --info->device_count;
242 ida_free(&binderfs_minors, device->miscdev.minor); 280 ida_free(&binderfs_minors, device->miscdev.minor);
243 mutex_unlock(&binderfs_minors_mutex); 281 mutex_unlock(&binderfs_minors_mutex);
244 282
@@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
246 kfree(device); 284 kfree(device);
247} 285}
248 286
287/**
288 * binderfs_parse_mount_opts - parse binderfs mount options
289 * @data: options to set (can be NULL in which case defaults are used)
290 */
291static int binderfs_parse_mount_opts(char *data,
292 struct binderfs_mount_opts *opts)
293{
294 char *p;
295 opts->max = BINDERFS_MAX_MINOR;
296
297 while ((p = strsep(&data, ",")) != NULL) {
298 substring_t args[MAX_OPT_ARGS];
299 int token;
300 int max_devices;
301
302 if (!*p)
303 continue;
304
305 token = match_token(p, tokens, args);
306 switch (token) {
307 case Opt_max:
308 if (match_int(&args[0], &max_devices) ||
309 (max_devices < 0 ||
310 (max_devices > BINDERFS_MAX_MINOR)))
311 return -EINVAL;
312
313 opts->max = max_devices;
314 break;
315 default:
316 pr_err("Invalid mount options\n");
317 return -EINVAL;
318 }
319 }
320
321 return 0;
322}
323
324static int binderfs_remount(struct super_block *sb, int *flags, char *data)
325{
326 struct binderfs_info *info = sb->s_fs_info;
327 return binderfs_parse_mount_opts(data, &info->mount_opts);
328}
329
330static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
331{
332 struct binderfs_info *info;
333
334 info = root->d_sb->s_fs_info;
335 if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
336 seq_printf(seq, ",max=%d", info->mount_opts.max);
337
338 return 0;
339}
340
249static const struct super_operations binderfs_super_ops = { 341static const struct super_operations binderfs_super_ops = {
250 .statfs = simple_statfs, 342 .evict_inode = binderfs_evict_inode,
251 .evict_inode = binderfs_evict_inode, 343 .remount_fs = binderfs_remount,
344 .show_options = binderfs_show_mount_opts,
345 .statfs = simple_statfs,
252}; 346};
253 347
348static inline bool is_binderfs_control_device(const struct dentry *dentry)
349{
350 struct binderfs_info *info = dentry->d_sb->s_fs_info;
351 return info->control_dentry == dentry;
352}
353
254static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, 354static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
255 struct inode *new_dir, struct dentry *new_dentry, 355 struct inode *new_dir, struct dentry *new_dentry,
256 unsigned int flags) 356 unsigned int flags)
257{ 357{
258 struct inode *inode = d_inode(old_dentry); 358 if (is_binderfs_control_device(old_dentry) ||
259 359 is_binderfs_control_device(new_dentry))
260 /* binderfs doesn't support directories. */
261 if (d_is_dir(old_dentry))
262 return -EPERM; 360 return -EPERM;
263 361
264 if (flags & ~RENAME_NOREPLACE) 362 return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
265 return -EINVAL;
266
267 if (!simple_empty(new_dentry))
268 return -ENOTEMPTY;
269
270 if (d_really_is_positive(new_dentry))
271 simple_unlink(new_dir, new_dentry);
272
273 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
274 new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
275
276 return 0;
277} 363}
278 364
279static int binderfs_unlink(struct inode *dir, struct dentry *dentry) 365static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
280{ 366{
281 /* 367 if (is_binderfs_control_device(dentry))
282 * The control dentry is only ever touched during mount so checking it
283 * here should not require us to take lock.
284 */
285 if (BINDERFS_I(dir)->control_dentry == dentry)
286 return -EPERM; 368 return -EPERM;
287 369
288 return simple_unlink(dir, dentry); 370 return simple_unlink(dir, dentry);
@@ -318,8 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
318 if (!device) 400 if (!device)
319 return -ENOMEM; 401 return -ENOMEM;
320 402
321 inode_lock(d_inode(root));
322
323 /* If we have already created a binder-control node, return. */ 403 /* If we have already created a binder-control node, return. */
324 if (info->control_dentry) { 404 if (info->control_dentry) {
325 ret = 0; 405 ret = 0;
@@ -358,12 +438,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
358 inode->i_private = device; 438 inode->i_private = device;
359 info->control_dentry = dentry; 439 info->control_dentry = dentry;
360 d_add(dentry, inode); 440 d_add(dentry, inode);
361 inode_unlock(d_inode(root));
362 441
363 return 0; 442 return 0;
364 443
365out: 444out:
366 inode_unlock(d_inode(root));
367 kfree(device); 445 kfree(device);
368 iput(inode); 446 iput(inode);
369 447
@@ -378,12 +456,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
378 456
379static int binderfs_fill_super(struct super_block *sb, void *data, int silent) 457static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
380{ 458{
459 int ret;
381 struct binderfs_info *info; 460 struct binderfs_info *info;
382 int ret = -ENOMEM;
383 struct inode *inode = NULL; 461 struct inode *inode = NULL;
384 struct ipc_namespace *ipc_ns = sb->s_fs_info;
385
386 get_ipc_ns(ipc_ns);
387 462
388 sb->s_blocksize = PAGE_SIZE; 463 sb->s_blocksize = PAGE_SIZE;
389 sb->s_blocksize_bits = PAGE_SHIFT; 464 sb->s_blocksize_bits = PAGE_SHIFT;
@@ -405,11 +480,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
405 sb->s_op = &binderfs_super_ops; 480 sb->s_op = &binderfs_super_ops;
406 sb->s_time_gran = 1; 481 sb->s_time_gran = 1;
407 482
408 info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); 483 sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
409 if (!info) 484 if (!sb->s_fs_info)
410 goto err_without_dentry; 485 return -ENOMEM;
486 info = sb->s_fs_info;
487
488 info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
489
490 ret = binderfs_parse_mount_opts(data, &info->mount_opts);
491 if (ret)
492 return ret;
411 493
412 info->ipc_ns = ipc_ns;
413 info->root_gid = make_kgid(sb->s_user_ns, 0); 494 info->root_gid = make_kgid(sb->s_user_ns, 0);
414 if (!gid_valid(info->root_gid)) 495 if (!gid_valid(info->root_gid))
415 info->root_gid = GLOBAL_ROOT_GID; 496 info->root_gid = GLOBAL_ROOT_GID;
@@ -417,11 +498,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
417 if (!uid_valid(info->root_uid)) 498 if (!uid_valid(info->root_uid))
418 info->root_uid = GLOBAL_ROOT_UID; 499 info->root_uid = GLOBAL_ROOT_UID;
419 500
420 sb->s_fs_info = info;
421
422 inode = new_inode(sb); 501 inode = new_inode(sb);
423 if (!inode) 502 if (!inode)
424 goto err_without_dentry; 503 return -ENOMEM;
425 504
426 inode->i_ino = FIRST_INODE; 505 inode->i_ino = FIRST_INODE;
427 inode->i_fop = &simple_dir_operations; 506 inode->i_fop = &simple_dir_operations;
@@ -432,79 +511,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
432 511
433 sb->s_root = d_make_root(inode); 512 sb->s_root = d_make_root(inode);
434 if (!sb->s_root) 513 if (!sb->s_root)
435 goto err_without_dentry; 514 return -ENOMEM;
436
437 ret = binderfs_binder_ctl_create(sb);
438 if (ret)
439 goto err_with_dentry;
440
441 return 0;
442
443err_with_dentry:
444 dput(sb->s_root);
445 sb->s_root = NULL;
446
447err_without_dentry:
448 put_ipc_ns(ipc_ns);
449 iput(inode);
450 kfree(info);
451
452 return ret;
453}
454
455static int binderfs_test_super(struct super_block *sb, void *data)
456{
457 struct binderfs_info *info = sb->s_fs_info;
458
459 if (info)
460 return info->ipc_ns == data;
461
462 return 0;
463}
464 515
465static int binderfs_set_super(struct super_block *sb, void *data) 516 return binderfs_binder_ctl_create(sb);
466{
467 sb->s_fs_info = data;
468 return set_anon_super(sb, NULL);
469} 517}
470 518
471static struct dentry *binderfs_mount(struct file_system_type *fs_type, 519static struct dentry *binderfs_mount(struct file_system_type *fs_type,
472 int flags, const char *dev_name, 520 int flags, const char *dev_name,
473 void *data) 521 void *data)
474{ 522{
475 struct super_block *sb; 523 return mount_nodev(fs_type, flags, data, binderfs_fill_super);
476 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
477
478 if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
479 return ERR_PTR(-EPERM);
480
481 sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
482 flags, ipc_ns->user_ns, ipc_ns);
483 if (IS_ERR(sb))
484 return ERR_CAST(sb);
485
486 if (!sb->s_root) {
487 int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
488 if (ret) {
489 deactivate_locked_super(sb);
490 return ERR_PTR(ret);
491 }
492
493 sb->s_flags |= SB_ACTIVE;
494 }
495
496 return dget(sb->s_root);
497} 524}
498 525
499static void binderfs_kill_super(struct super_block *sb) 526static void binderfs_kill_super(struct super_block *sb)
500{ 527{
501 struct binderfs_info *info = sb->s_fs_info; 528 struct binderfs_info *info = sb->s_fs_info;
502 529
530 kill_litter_super(sb);
531
503 if (info && info->ipc_ns) 532 if (info && info->ipc_ns)
504 put_ipc_ns(info->ipc_ns); 533 put_ipc_ns(info->ipc_ns);
505 534
506 kfree(info); 535 kfree(info);
507 kill_litter_super(sb);
508} 536}
509 537
510static struct file_system_type binder_fs_type = { 538static struct file_system_type binder_fs_type = {
@@ -530,14 +558,6 @@ static int __init init_binderfs(void)
530 return ret; 558 return ret;
531 } 559 }
532 560
533 binderfs_mnt = kern_mount(&binder_fs_type);
534 if (IS_ERR(binderfs_mnt)) {
535 ret = PTR_ERR(binderfs_mnt);
536 binderfs_mnt = NULL;
537 unregister_filesystem(&binder_fs_type);
538 unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
539 }
540
541 return ret; 561 return ret;
542} 562}
543 563
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 8cc9c429ad95..9e7fc302430f 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
915 .sg_tablesize = MAX_DCMDS, 915 .sg_tablesize = MAX_DCMDS,
916 /* We may not need that strict one */ 916 /* We may not need that strict one */
917 .dma_boundary = ATA_DMA_BOUNDARY, 917 .dma_boundary = ATA_DMA_BOUNDARY,
918 /* Not sure what the real max is but we know it's less than 64K, let's
919 * use 64K minus 256
920 */
921 .max_segment_size = MAX_DBDMA_SEG,
918 .slave_configure = pata_macio_slave_config, 922 .slave_configure = pata_macio_slave_config,
919}; 923};
920 924
@@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
1044 /* Make sure we have sane initial timings in the cache */ 1048 /* Make sure we have sane initial timings in the cache */
1045 pata_macio_default_timings(priv); 1049 pata_macio_default_timings(priv);
1046 1050
1047 /* Not sure what the real max is but we know it's less than 64K, let's
1048 * use 64K minus 256
1049 */
1050 dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1051
1052 /* Allocate libata host for 1 port */ 1051 /* Allocate libata host for 1 port */
1053 memset(&pinfo, 0, sizeof(struct ata_port_info)); 1052 memset(&pinfo, 0, sizeof(struct ata_port_info));
1054 pmac_macio_calc_timing_masks(priv, &pinfo); 1053 pmac_macio_calc_timing_masks(priv, &pinfo);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e0bcf9b2dab0..174e84ce4379 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -245,8 +245,15 @@ struct inic_port_priv {
245 245
246static struct scsi_host_template inic_sht = { 246static struct scsi_host_template inic_sht = {
247 ATA_BASE_SHT(DRV_NAME), 247 ATA_BASE_SHT(DRV_NAME),
248 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ 248 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
249 .dma_boundary = INIC_DMA_BOUNDARY, 249
250 /*
251 * This controller is braindamaged. dma_boundary is 0xffff like others
252 * but it will lock up the whole machine HARD if 65536 byte PRD entry
253 * is fed. Reduce maximum segment size.
254 */
255 .dma_boundary = INIC_DMA_BOUNDARY,
256 .max_segment_size = 65536 - 512,
250}; 257};
251 258
252static const int scr_map[] = { 259static const int scr_map[] = {
@@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
868 return rc; 875 return rc;
869 } 876 }
870 877
871 /*
872 * This controller is braindamaged. dma_boundary is 0xffff
873 * like others but it will lock up the whole machine HARD if
874 * 65536 byte PRD entry is fed. Reduce maximum segment size.
875 */
876 rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
877 if (rc) {
878 dev_err(&pdev->dev, "failed to set the maximum segment size\n");
879 return rc;
880 }
881
882 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); 878 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
883 if (rc) { 879 if (rc) {
884 dev_err(&pdev->dev, "failed to initialize controller\n"); 880 dev_err(&pdev->dev, "failed to initialize controller\n");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 457be03b744d..0ea2139c50d8 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
130{ 130{
131 int autosuspend_delay; 131 int autosuspend_delay;
132 u64 last_busy, expires = 0; 132 u64 last_busy, expires = 0;
133 u64 now = ktime_to_ns(ktime_get()); 133 u64 now = ktime_get_mono_fast_ns();
134 134
135 if (!dev->power.use_autosuspend) 135 if (!dev->power.use_autosuspend)
136 goto out; 136 goto out;
@@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
909 * If 'expires' is after the current time, we've been called 909 * If 'expires' is after the current time, we've been called
910 * too early. 910 * too early.
911 */ 911 */
912 if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 912 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
913 dev->power.timer_expires = 0; 913 dev->power.timer_expires = 0;
914 rpm_suspend(dev, dev->power.timer_autosuspends ? 914 rpm_suspend(dev, dev->power.timer_autosuspends ?
915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
928int pm_schedule_suspend(struct device *dev, unsigned int delay) 928int pm_schedule_suspend(struct device *dev, unsigned int delay)
929{ 929{
930 unsigned long flags; 930 unsigned long flags;
931 ktime_t expires; 931 u64 expires;
932 int retval; 932 int retval;
933 933
934 spin_lock_irqsave(&dev->power.lock, flags); 934 spin_lock_irqsave(&dev->power.lock, flags);
@@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
945 /* Other scheduled or pending requests need to be canceled. */ 945 /* Other scheduled or pending requests need to be canceled. */
946 pm_runtime_cancel_pending(dev); 946 pm_runtime_cancel_pending(dev);
947 947
948 expires = ktime_add(ktime_get(), ms_to_ktime(delay)); 948 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
949 dev->power.timer_expires = ktime_to_ns(expires); 949 dev->power.timer_expires = expires;
950 dev->power.timer_autosuspends = 0; 950 dev->power.timer_autosuspends = 0;
951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
952 952
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a74ce885b541..c518659b4d9f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -32,6 +32,7 @@
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/uuid.h> 34#include <linux/uuid.h>
35#include <linux/nospec.h>
35 36
36#define IPMI_DRIVER_VERSION "39.2" 37#define IPMI_DRIVER_VERSION "39.2"
37 38
@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
62{ } 63{ }
63#endif 64#endif
64 65
65static int initialized; 66static bool initialized;
67static bool drvregistered;
66 68
67enum ipmi_panic_event_op { 69enum ipmi_panic_event_op {
68 IPMI_SEND_PANIC_EVENT_NONE, 70 IPMI_SEND_PANIC_EVENT_NONE,
@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
612 614
613static LIST_HEAD(ipmi_interfaces); 615static LIST_HEAD(ipmi_interfaces);
614static DEFINE_MUTEX(ipmi_interfaces_mutex); 616static DEFINE_MUTEX(ipmi_interfaces_mutex);
615DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); 617struct srcu_struct ipmi_interfaces_srcu;
616 618
617/* 619/*
618 * List of watchers that want to know when smi's are added and deleted. 620 * List of watchers that want to know when smi's are added and deleted.
@@ -720,7 +722,15 @@ struct watcher_entry {
720int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 722int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
721{ 723{
722 struct ipmi_smi *intf; 724 struct ipmi_smi *intf;
723 int index; 725 int index, rv;
726
727 /*
728 * Make sure the driver is actually initialized, this handles
729 * problems with initialization order.
730 */
731 rv = ipmi_init_msghandler();
732 if (rv)
733 return rv;
724 734
725 mutex_lock(&smi_watchers_mutex); 735 mutex_lock(&smi_watchers_mutex);
726 736
@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
884 894
885 if (user) { 895 if (user) {
886 user->handler->ipmi_recv_hndl(msg, user->handler_data); 896 user->handler->ipmi_recv_hndl(msg, user->handler_data);
887 release_ipmi_user(msg->user, index); 897 release_ipmi_user(user, index);
888 } else { 898 } else {
889 /* User went away, give up. */ 899 /* User went away, give up. */
890 ipmi_free_recv_msg(msg); 900 ipmi_free_recv_msg(msg);
@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
1076{ 1086{
1077 unsigned long flags; 1087 unsigned long flags;
1078 struct ipmi_user *new_user; 1088 struct ipmi_user *new_user;
1079 int rv = 0, index; 1089 int rv, index;
1080 struct ipmi_smi *intf; 1090 struct ipmi_smi *intf;
1081 1091
1082 /* 1092 /*
@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
1094 * Make sure the driver is actually initialized, this handles 1104 * Make sure the driver is actually initialized, this handles
1095 * problems with initialization order. 1105 * problems with initialization order.
1096 */ 1106 */
1097 if (!initialized) { 1107 rv = ipmi_init_msghandler();
1098 rv = ipmi_init_msghandler(); 1108 if (rv)
1099 if (rv) 1109 return rv;
1100 return rv;
1101
1102 /*
1103 * The init code doesn't return an error if it was turned
1104 * off, but it won't initialize. Check that.
1105 */
1106 if (!initialized)
1107 return -ENODEV;
1108 }
1109 1110
1110 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 1111 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1111 if (!new_user) 1112 if (!new_user)
@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
1183static void free_user(struct kref *ref) 1184static void free_user(struct kref *ref)
1184{ 1185{
1185 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1186 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1187 cleanup_srcu_struct(&user->release_barrier);
1186 kfree(user); 1188 kfree(user);
1187} 1189}
1188 1190
@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
1259{ 1261{
1260 _ipmi_destroy_user(user); 1262 _ipmi_destroy_user(user);
1261 1263
1262 cleanup_srcu_struct(&user->release_barrier);
1263 kref_put(&user->refcount, free_user); 1264 kref_put(&user->refcount, free_user);
1264 1265
1265 return 0; 1266 return 0;
@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
1298 if (!user) 1299 if (!user)
1299 return -ENODEV; 1300 return -ENODEV;
1300 1301
1301 if (channel >= IPMI_MAX_CHANNELS) 1302 if (channel >= IPMI_MAX_CHANNELS) {
1302 rv = -EINVAL; 1303 rv = -EINVAL;
1303 else 1304 } else {
1305 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1304 user->intf->addrinfo[channel].address = address; 1306 user->intf->addrinfo[channel].address = address;
1307 }
1305 release_ipmi_user(user, index); 1308 release_ipmi_user(user, index);
1306 1309
1307 return rv; 1310 return rv;
@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
1318 if (!user) 1321 if (!user)
1319 return -ENODEV; 1322 return -ENODEV;
1320 1323
1321 if (channel >= IPMI_MAX_CHANNELS) 1324 if (channel >= IPMI_MAX_CHANNELS) {
1322 rv = -EINVAL; 1325 rv = -EINVAL;
1323 else 1326 } else {
1327 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1324 *address = user->intf->addrinfo[channel].address; 1328 *address = user->intf->addrinfo[channel].address;
1329 }
1325 release_ipmi_user(user, index); 1330 release_ipmi_user(user, index);
1326 1331
1327 return rv; 1332 return rv;
@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
1338 if (!user) 1343 if (!user)
1339 return -ENODEV; 1344 return -ENODEV;
1340 1345
1341 if (channel >= IPMI_MAX_CHANNELS) 1346 if (channel >= IPMI_MAX_CHANNELS) {
1342 rv = -EINVAL; 1347 rv = -EINVAL;
1343 else 1348 } else {
1349 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1344 user->intf->addrinfo[channel].lun = LUN & 0x3; 1350 user->intf->addrinfo[channel].lun = LUN & 0x3;
1351 }
1345 release_ipmi_user(user, index); 1352 release_ipmi_user(user, index);
1346 1353
1347 return rv; 1354 return rv;
@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
1358 if (!user) 1365 if (!user)
1359 return -ENODEV; 1366 return -ENODEV;
1360 1367
1361 if (channel >= IPMI_MAX_CHANNELS) 1368 if (channel >= IPMI_MAX_CHANNELS) {
1362 rv = -EINVAL; 1369 rv = -EINVAL;
1363 else 1370 } else {
1371 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1364 *address = user->intf->addrinfo[channel].lun; 1372 *address = user->intf->addrinfo[channel].lun;
1373 }
1365 release_ipmi_user(user, index); 1374 release_ipmi_user(user, index);
1366 1375
1367 return rv; 1376 return rv;
@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
2184{ 2193{
2185 if (addr->channel >= IPMI_MAX_CHANNELS) 2194 if (addr->channel >= IPMI_MAX_CHANNELS)
2186 return -EINVAL; 2195 return -EINVAL;
2196 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2187 *lun = intf->addrinfo[addr->channel].lun; 2197 *lun = intf->addrinfo[addr->channel].lun;
2188 *saddr = intf->addrinfo[addr->channel].address; 2198 *saddr = intf->addrinfo[addr->channel].address;
2189 return 0; 2199 return 0;
@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3291 * Make sure the driver is actually initialized, this handles 3301 * Make sure the driver is actually initialized, this handles
3292 * problems with initialization order. 3302 * problems with initialization order.
3293 */ 3303 */
3294 if (!initialized) { 3304 rv = ipmi_init_msghandler();
3295 rv = ipmi_init_msghandler(); 3305 if (rv)
3296 if (rv) 3306 return rv;
3297 return rv;
3298 /*
3299 * The init code doesn't return an error if it was turned
3300 * off, but it won't initialize. Check that.
3301 */
3302 if (!initialized)
3303 return -ENODEV;
3304 }
3305 3307
3306 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3308 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3307 if (!intf) 3309 if (!intf)
@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
5017 return NOTIFY_DONE; 5019 return NOTIFY_DONE;
5018} 5020}
5019 5021
5022/* Must be called with ipmi_interfaces_mutex held. */
5023static int ipmi_register_driver(void)
5024{
5025 int rv;
5026
5027 if (drvregistered)
5028 return 0;
5029
5030 rv = driver_register(&ipmidriver.driver);
5031 if (rv)
5032 pr_err("Could not register IPMI driver\n");
5033 else
5034 drvregistered = true;
5035 return rv;
5036}
5037
5020static struct notifier_block panic_block = { 5038static struct notifier_block panic_block = {
5021 .notifier_call = panic_event, 5039 .notifier_call = panic_event,
5022 .next = NULL, 5040 .next = NULL,
@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
5027{ 5045{
5028 int rv; 5046 int rv;
5029 5047
5048 mutex_lock(&ipmi_interfaces_mutex);
5049 rv = ipmi_register_driver();
5050 if (rv)
5051 goto out;
5030 if (initialized) 5052 if (initialized)
5031 return 0; 5053 goto out;
5032
5033 rv = driver_register(&ipmidriver.driver);
5034 if (rv) {
5035 pr_err("Could not register IPMI driver\n");
5036 return rv;
5037 }
5038 5054
5039 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5055 init_srcu_struct(&ipmi_interfaces_srcu);
5040 5056
5041 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5057 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5042 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5058 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5043 5059
5044 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5060 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5045 5061
5046 initialized = 1; 5062 initialized = true;
5047 5063
5048 return 0; 5064out:
5065 mutex_unlock(&ipmi_interfaces_mutex);
5066 return rv;
5049} 5067}
5050 5068
5051static int __init ipmi_init_msghandler_mod(void) 5069static int __init ipmi_init_msghandler_mod(void)
5052{ 5070{
5053 ipmi_init_msghandler(); 5071 int rv;
5054 return 0; 5072
5073 pr_info("version " IPMI_DRIVER_VERSION "\n");
5074
5075 mutex_lock(&ipmi_interfaces_mutex);
5076 rv = ipmi_register_driver();
5077 mutex_unlock(&ipmi_interfaces_mutex);
5078
5079 return rv;
5055} 5080}
5056 5081
5057static void __exit cleanup_ipmi(void) 5082static void __exit cleanup_ipmi(void)
5058{ 5083{
5059 int count; 5084 int count;
5060 5085
5061 if (!initialized) 5086 if (initialized) {
5062 return; 5087 atomic_notifier_chain_unregister(&panic_notifier_list,
5063 5088 &panic_block);
5064 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
5065 5089
5066 /* 5090 /*
5067 * This can't be called if any interfaces exist, so no worry 5091 * This can't be called if any interfaces exist, so no worry
5068 * about shutting down the interfaces. 5092 * about shutting down the interfaces.
5069 */ 5093 */
5070 5094
5071 /* 5095 /*
5072 * Tell the timer to stop, then wait for it to stop. This 5096 * Tell the timer to stop, then wait for it to stop. This
5073 * avoids problems with race conditions removing the timer 5097 * avoids problems with race conditions removing the timer
5074 * here. 5098 * here.
5075 */ 5099 */
5076 atomic_inc(&stop_operation); 5100 atomic_inc(&stop_operation);
5077 del_timer_sync(&ipmi_timer); 5101 del_timer_sync(&ipmi_timer);
5078 5102
5079 driver_unregister(&ipmidriver.driver); 5103 initialized = false;
5080 5104
5081 initialized = 0; 5105 /* Check for buffer leaks. */
5106 count = atomic_read(&smi_msg_inuse_count);
5107 if (count != 0)
5108 pr_warn("SMI message count %d at exit\n", count);
5109 count = atomic_read(&recv_msg_inuse_count);
5110 if (count != 0)
5111 pr_warn("recv message count %d at exit\n", count);
5082 5112
5083 /* Check for buffer leaks. */ 5113 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5084 count = atomic_read(&smi_msg_inuse_count); 5114 }
5085 if (count != 0) 5115 if (drvregistered)
5086 pr_warn("SMI message count %d at exit\n", count); 5116 driver_unregister(&ipmidriver.driver);
5087 count = atomic_read(&recv_msg_inuse_count);
5088 if (count != 0)
5089 pr_warn("recv message count %d at exit\n", count);
5090} 5117}
5091module_exit(cleanup_ipmi); 5118module_exit(cleanup_ipmi);
5092 5119
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index ca9528c4f183..b7a1ae2afaea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
632 632
633 /* Remove the multi-part read marker. */ 633 /* Remove the multi-part read marker. */
634 len -= 2; 634 len -= 2;
635 data += 2;
635 for (i = 0; i < len; i++) 636 for (i = 0; i < len; i++)
636 ssif_info->data[i] = data[i+2]; 637 ssif_info->data[i] = data[i];
637 ssif_info->multi_len = len; 638 ssif_info->multi_len = len;
638 ssif_info->multi_pos = 1; 639 ssif_info->multi_pos = 1;
639 640
@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
661 } 662 }
662 663
663 blocknum = data[0]; 664 blocknum = data[0];
665 len--;
666 data++;
667
668 if (blocknum != 0xff && len != 31) {
669 /* All blocks but the last must have 31 data bytes. */
670 result = -EIO;
671 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
672 pr_info("Received middle message <31\n");
664 673
665 if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { 674 goto continue_op;
675 }
676
677 if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
666 /* Received message too big, abort the operation. */ 678 /* Received message too big, abort the operation. */
667 result = -E2BIG; 679 result = -E2BIG;
668 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) 680 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
671 goto continue_op; 683 goto continue_op;
672 } 684 }
673 685
674 /* Remove the blocknum from the data. */
675 len--;
676 for (i = 0; i < len; i++) 686 for (i = 0; i < len; i++)
677 ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; 687 ssif_info->data[i + ssif_info->multi_len] = data[i];
678 ssif_info->multi_len += len; 688 ssif_info->multi_len += len;
679 if (blocknum == 0xff) { 689 if (blocknum == 0xff) {
680 /* End of read */ 690 /* End of read */
681 len = ssif_info->multi_len; 691 len = ssif_info->multi_len;
682 data = ssif_info->data; 692 data = ssif_info->data;
683 } else if (blocknum + 1 != ssif_info->multi_pos) { 693 } else if (blocknum != ssif_info->multi_pos) {
684 /* 694 /*
685 * Out of sequence block, just abort. Block 695 * Out of sequence block, just abort. Block
686 * numbers start at zero for the second block, 696 * numbers start at zero for the second block,
@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
707 } 717 }
708 } 718 }
709 719
720 continue_op:
710 if (result < 0) { 721 if (result < 0) {
711 ssif_inc_stat(ssif_info, receive_errors); 722 ssif_inc_stat(ssif_info, receive_errors);
712 } else { 723 } else {
@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
714 ssif_inc_stat(ssif_info, received_message_parts); 725 ssif_inc_stat(ssif_info, received_message_parts);
715 } 726 }
716 727
717
718 continue_op:
719 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) 728 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
720 pr_info("DONE 1: state = %d, result=%d\n", 729 pr_info("DONE 1: state = %d, result=%d\n",
721 ssif_info->ssif_state, result); 730 ssif_info->ssif_state, result);
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index b5e3103c1175..e43c876a9223 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
59#include <linux/mutex.h> 59#include <linux/mutex.h>
60#include <linux/delay.h> 60#include <linux/delay.h>
61#include <linux/serial_8250.h> 61#include <linux/serial_8250.h>
62#include <linux/nospec.h>
62#include "smapi.h" 63#include "smapi.h"
63#include "mwavedd.h" 64#include "mwavedd.h"
64#include "3780i.h" 65#include "3780i.h"
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
289 ipcnum); 290 ipcnum);
290 return -EINVAL; 291 return -EINVAL;
291 } 292 }
293 ipcnum = array_index_nospec(ipcnum,
294 ARRAY_SIZE(pDrvData->IPCs));
292 PRINTK_3(TRACE_MWAVE, 295 PRINTK_3(TRACE_MWAVE,
293 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" 296 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
294 " ipcnum %x entry usIntCount %x\n", 297 " ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
317 " Invalid ipcnum %x\n", ipcnum); 320 " Invalid ipcnum %x\n", ipcnum);
318 return -EINVAL; 321 return -EINVAL;
319 } 322 }
323 ipcnum = array_index_nospec(ipcnum,
324 ARRAY_SIZE(pDrvData->IPCs));
320 PRINTK_3(TRACE_MWAVE, 325 PRINTK_3(TRACE_MWAVE,
321 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" 326 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
322 " ipcnum %x, usIntCount %x\n", 327 " ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
383 ipcnum); 388 ipcnum);
384 return -EINVAL; 389 return -EINVAL;
385 } 390 }
391 ipcnum = array_index_nospec(ipcnum,
392 ARRAY_SIZE(pDrvData->IPCs));
386 mutex_lock(&mwave_mutex); 393 mutex_lock(&mwave_mutex);
387 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { 394 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
388 pDrvData->IPCs[ipcnum].bIsEnabled = false; 395 pDrvData->IPCs[ipcnum].bIsEnabled = false;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e5b2fe80eab4..d2f0bb5ba47e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX
293source "drivers/clk/actions/Kconfig" 293source "drivers/clk/actions/Kconfig"
294source "drivers/clk/bcm/Kconfig" 294source "drivers/clk/bcm/Kconfig"
295source "drivers/clk/hisilicon/Kconfig" 295source "drivers/clk/hisilicon/Kconfig"
296source "drivers/clk/imx/Kconfig"
297source "drivers/clk/imgtec/Kconfig" 296source "drivers/clk/imgtec/Kconfig"
298source "drivers/clk/imx/Kconfig" 297source "drivers/clk/imx/Kconfig"
299source "drivers/clk/ingenic/Kconfig" 298source "drivers/clk/ingenic/Kconfig"
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 5b393e711e94..7d16ab0784ec 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
262 262
263 if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) 263 if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
264 src = VC5_PRIM_SRC_SHDN_EN_XTAL; 264 src = VC5_PRIM_SRC_SHDN_EN_XTAL;
265 if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) 265 else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
266 src = VC5_PRIM_SRC_SHDN_EN_CLKIN; 266 src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
267 else /* Invalid; should have been caught by vc5_probe() */
268 return -EINVAL;
267 } 269 }
268 270
269 return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); 271 return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 75d13c0eff12..d2477a5058ac 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core,
1513 if (!parent) 1513 if (!parent)
1514 return -EINVAL; 1514 return -EINVAL;
1515 1515
1516 for (i = 0; i < core->num_parents; i++) 1516 for (i = 0; i < core->num_parents; i++) {
1517 if (clk_core_get_parent_by_index(core, i) == parent) 1517 if (core->parents[i] == parent)
1518 return i;
1519
1520 if (core->parents[i])
1521 continue;
1522
1523 /* Fallback to comparing globally unique names */
1524 if (!strcmp(parent->name, core->parent_names[i])) {
1525 core->parents[i] = parent;
1518 return i; 1526 return i;
1527 }
1528 }
1519 1529
1520 return -EINVAL; 1530 return -EINVAL;
1521} 1531}
@@ -2779,7 +2789,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2779 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2789 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2780 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2790 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2781 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2791 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2782 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2792 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
2783 seq_printf(s, "\"duty_cycle\": %u", 2793 seq_printf(s, "\"duty_cycle\": %u",
2784 clk_core_get_scaled_duty_cycle(c, 100000)); 2794 clk_core_get_scaled_duty_cycle(c, 100000));
2785} 2795}
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 0026c3969b1e..76b9eb15604e 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
155{ 155{
156 struct clk_frac_pll *pll = to_clk_frac_pll(hw); 156 struct clk_frac_pll *pll = to_clk_frac_pll(hw);
157 u32 val, divfi, divff; 157 u32 val, divfi, divff;
158 u64 temp64 = parent_rate; 158 u64 temp64;
159 int ret; 159 int ret;
160 160
161 parent_rate *= 8; 161 parent_rate *= 8;
162 rate *= 2; 162 rate *= 2;
163 divfi = rate / parent_rate; 163 divfi = rate / parent_rate;
164 temp64 *= rate - divfi; 164 temp64 = parent_rate * divfi;
165 temp64 = rate - temp64;
165 temp64 *= PLL_FRAC_DENOM; 166 temp64 *= PLL_FRAC_DENOM;
166 do_div(temp64, parent_rate); 167 do_div(temp64, parent_rate);
167 divff = temp64; 168 divff = temp64;
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index 99c2508de8e5..fb6edf1b8aa2 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
169 return -ENODEV; 169 return -ENODEV;
170 170
171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172 if (!res)
173 return -EINVAL;
172 base = devm_ioremap(dev, res->start, resource_size(res)); 174 base = devm_ioremap(dev, res->start, resource_size(res));
173 if (!base) 175 if (!base)
174 return -ENOMEM; 176 return -ENOMEM;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 61fefc046ec5..d083b860f083 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,7 +53,6 @@
53#define APMU_DISP1 0x110 53#define APMU_DISP1 0x110
54#define APMU_CCIC0 0x50 54#define APMU_CCIC0 0x50
55#define APMU_CCIC1 0xf4 55#define APMU_CCIC1 0xf4
56#define APMU_SP 0x68
57#define MPMU_UART_PLL 0x14 56#define MPMU_UART_PLL 0x14
58 57
59struct mmp2_clk_unit { 58struct mmp2_clk_unit {
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
210 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), 209 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
211}; 210};
212 211
213static DEFINE_SPINLOCK(sp_lock);
214
215static struct mmp_param_mux_clk apmu_mux_clks[] = { 212static struct mmp_param_mux_clk apmu_mux_clks[] = {
216 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, 213 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
217 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, 214 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
242 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, 239 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
243 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, 240 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
244 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, 241 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
245 {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
246}; 242};
247 243
248static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) 244static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1b1ba54e33dd..1c04575c118f 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -215,6 +215,7 @@ config MSM_MMCC_8996
215 215
216config MSM_GCC_8998 216config MSM_GCC_8998
217 tristate "MSM8998 Global Clock Controller" 217 tristate "MSM8998 Global Clock Controller"
218 select QCOM_GDSC
218 help 219 help
219 Support for the global clock controller on msm8998 devices. 220 Support for the global clock controller on msm8998 devices.
220 Say Y if you want to use peripheral devices such as UART, SPI, 221 Say Y if you want to use peripheral devices such as UART, SPI,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index c782e62dd98b..58fa5c247af1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
115 "core_bi_pll_test_se", 115 "core_bi_pll_test_se",
116}; 116};
117 117
118static const char * const gcc_parent_names_7[] = { 118static const char * const gcc_parent_names_7_ao[] = {
119 "bi_tcxo", 119 "bi_tcxo_ao",
120 "gpll0", 120 "gpll0",
121 "gpll0_out_even", 121 "gpll0_out_even",
122 "core_bi_pll_test_se", 122 "core_bi_pll_test_se",
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
128 "core_bi_pll_test_se", 128 "core_bi_pll_test_se",
129}; 129};
130 130
131static const char * const gcc_parent_names_8_ao[] = {
132 "bi_tcxo_ao",
133 "gpll0",
134 "core_bi_pll_test_se",
135};
136
131static const struct parent_map gcc_parent_map_10[] = { 137static const struct parent_map gcc_parent_map_10[] = {
132 { P_BI_TCXO, 0 }, 138 { P_BI_TCXO, 0 },
133 { P_GPLL0_OUT_MAIN, 1 }, 139 { P_GPLL0_OUT_MAIN, 1 },
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
210 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, 216 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
211 .clkr.hw.init = &(struct clk_init_data){ 217 .clkr.hw.init = &(struct clk_init_data){
212 .name = "gcc_cpuss_ahb_clk_src", 218 .name = "gcc_cpuss_ahb_clk_src",
213 .parent_names = gcc_parent_names_7, 219 .parent_names = gcc_parent_names_7_ao,
214 .num_parents = 4, 220 .num_parents = 4,
215 .ops = &clk_rcg2_ops, 221 .ops = &clk_rcg2_ops,
216 }, 222 },
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
229 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, 235 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
230 .clkr.hw.init = &(struct clk_init_data){ 236 .clkr.hw.init = &(struct clk_init_data){
231 .name = "gcc_cpuss_rbcpr_clk_src", 237 .name = "gcc_cpuss_rbcpr_clk_src",
232 .parent_names = gcc_parent_names_8, 238 .parent_names = gcc_parent_names_8_ao,
233 .num_parents = 3, 239 .num_parents = 3,
234 .ops = &clk_rcg2_ops, 240 .ops = &clk_rcg2_ops,
235 }, 241 },
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 2d5d8b43727e..c4d0b6f6abf2 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
43 /* Read mdiv and fdiv from the fdbck register */ 43 /* Read mdiv and fdiv from the fdbck register */
44 reg = readl(socfpgaclk->hw.reg + 0x4); 44 reg = readl(socfpgaclk->hw.reg + 0x4);
45 mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; 45 mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
46 vco_freq = (unsigned long long)parent_rate * (mdiv + 6); 46 vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
47 47
48 return (unsigned long)vco_freq; 48 return (unsigned long)vco_freq;
49} 49}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5b238fc314ac..8281dfbf38c2 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,17 +12,17 @@
12 12
13#include "stratix10-clk.h" 13#include "stratix10-clk.h"
14 14
15static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", 15static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
16 "f2s_free_clk",}; 16 "f2s-free-clk",};
17static const char * const cntr_mux[] = { "main_pll", "periph_pll", 17static const char * const cntr_mux[] = { "main_pll", "periph_pll",
18 "osc1", "cb_intosc_hs_div2_clk", 18 "osc1", "cb-intosc-hs-div2-clk",
19 "f2s_free_clk"}; 19 "f2s-free-clk"};
20static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; 20static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
21 21
22static const char * const noc_free_mux[] = {"main_noc_base_clk", 22static const char * const noc_free_mux[] = {"main_noc_base_clk",
23 "peri_noc_base_clk", 23 "peri_noc_base_clk",
24 "osc1", "cb_intosc_hs_div2_clk", 24 "osc1", "cb-intosc-hs-div2-clk",
25 "f2s_free_clk"}; 25 "f2s-free-clk"};
26 26
27static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; 27static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
28static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; 28static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
33static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; 33static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
34static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; 34static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
35 35
36static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; 36static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
37static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; 37static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
38static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; 38static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
39 39
40static const char * const mpu_free_mux[] = {"main_mpu_base_clk", 40static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
41 "peri_mpu_base_clk", 41 "peri_mpu_base_clk",
42 "osc1", "cb_intosc_hs_div2_clk", 42 "osc1", "cb-intosc-hs-div2-clk",
43 "f2s_free_clk"}; 43 "f2s-free-clk"};
44 44
45/* clocks in AO (always on) controller */ 45/* clocks in AO (always on) controller */
46static const struct stratix10_pll_clock s10_pll_clks[] = { 46static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d3595758b..edc31bb56674 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
133 struct tegra_dfll_soc_data *soc; 133 struct tegra_dfll_soc_data *soc;
134 134
135 soc = tegra_dfll_unregister(pdev); 135 soc = tegra_dfll_unregister(pdev);
136 if (IS_ERR(soc)) 136 if (IS_ERR(soc)) {
137 dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", 137 dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
138 PTR_ERR(soc)); 138 PTR_ERR(soc));
139 return PTR_ERR(soc);
140 }
139 141
140 tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); 142 tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
141 143
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 8d77090ad94a..0241450f3eb3 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
403 num_dividers = i; 403 num_dividers = i;
404 404
405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); 405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
406 if (!tmp) 406 if (!tmp) {
407 *table = ERR_PTR(-ENOMEM);
407 return -ENOMEM; 408 return -ENOMEM;
409 }
408 410
409 valid_div = 0; 411 valid_div = 0;
410 *width = 0; 412 *width = 0;
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
439{ 441{
440 struct clk_omap_divider *div; 442 struct clk_omap_divider *div;
441 struct clk_omap_reg *reg; 443 struct clk_omap_reg *reg;
444 int ret;
442 445
443 if (!setup) 446 if (!setup)
444 return NULL; 447 return NULL;
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
458 div->flags |= CLK_DIVIDER_POWER_OF_TWO; 461 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
459 462
460 div->table = _get_div_table_from_setup(setup, &div->width); 463 div->table = _get_div_table_from_setup(setup, &div->width);
464 if (IS_ERR(div->table)) {
465 ret = PTR_ERR(div->table);
466 kfree(div);
467 return ERR_PTR(ret);
468 }
469
461 470
462 div->shift = setup->bit_shift; 471 div->shift = setup->bit_shift;
463 div->latch = -EINVAL; 472 div->latch = -EINVAL;
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index f65cc0ff76ab..b0908ec62f73 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
669 if (ret) 669 if (ret)
670 return ret; 670 return ret;
671 671
672 zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) * 672 zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
673 clock_max_idx, GFP_KERNEL); 673 GFP_KERNEL);
674 if (!zynqmp_data) 674 if (!zynqmp_data)
675 return -ENOMEM; 675 return -ENOMEM;
676 676
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index b17d153e724f..23a1b27579a5 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
21 local_irq_enable(); 21 local_irq_enable();
22 if (!current_set_polling_and_test()) { 22 if (!current_set_polling_and_test()) {
23 unsigned int loop_count = 0; 23 unsigned int loop_count = 0;
24 u64 limit = TICK_USEC; 24 u64 limit = TICK_NSEC;
25 int i; 25 int i;
26 26
27 for (i = 1; i < drv->state_count; i++) { 27 for (i = 1; i < drv->state_count; i++) {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index fe070d75c842..4c97478d44bd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
537 struct nitrox_device *ndev = cmdq->ndev; 537 struct nitrox_device *ndev = cmdq->ndev;
538 struct nitrox_softreq *sr; 538 struct nitrox_softreq *sr;
539 int req_completed = 0, err = 0, budget; 539 int req_completed = 0, err = 0, budget;
540 completion_t callback;
541 void *cb_arg;
540 542
541 /* check all pending requests */ 543 /* check all pending requests */
542 budget = atomic_read(&cmdq->pending_count); 544 budget = atomic_read(&cmdq->pending_count);
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
564 smp_mb__after_atomic(); 566 smp_mb__after_atomic();
565 /* remove from response list */ 567 /* remove from response list */
566 response_list_del(sr, cmdq); 568 response_list_del(sr, cmdq);
567
568 /* ORH error code */ 569 /* ORH error code */
569 err = READ_ONCE(*sr->resp.orh) & 0xff; 570 err = READ_ONCE(*sr->resp.orh) & 0xff;
570 571 callback = sr->callback;
571 if (sr->callback) 572 cb_arg = sr->cb_arg;
572 sr->callback(sr->cb_arg, err);
573 softreq_destroy(sr); 573 softreq_destroy(sr);
574 if (callback)
575 callback(cb_arg, err);
574 576
575 req_completed++; 577 req_completed++;
576 } 578 }
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 4213cb0bb2a7..f8664bac9fa8 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
295#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 295#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
296 296
297/* Sticky registers for Uncorrected Errors */ 297/* Sticky registers for Uncorrected Errors */
298#define S10_SYSMGR_UE_VAL_OFST 0x120 298#define S10_SYSMGR_UE_VAL_OFST 0x220
299#define S10_SYSMGR_UE_ADDR_OFST 0x124 299#define S10_SYSMGR_UE_ADDR_OFST 0x224
300 300
301#define S10_DDR0_IRQ_MASK BIT(16) 301#define S10_DDR0_IRQ_MASK BIT(16)
302 302
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 09b845e90114..a785ffd5af89 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
1144 if (device->is_local) 1144 if (device->is_local)
1145 return -ENODEV; 1145 return -ENODEV;
1146 1146
1147 if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
1148 WARN_ON(dma_set_max_seg_size(device->card->device,
1149 SBP2_MAX_SEG_SIZE));
1150
1151 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); 1147 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
1152 if (shost == NULL) 1148 if (shost == NULL)
1153 return -ENOMEM; 1149 return -ENOMEM;
@@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
1610 .eh_abort_handler = sbp2_scsi_abort, 1606 .eh_abort_handler = sbp2_scsi_abort,
1611 .this_id = -1, 1607 .this_id = -1,
1612 .sg_tablesize = SG_ALL, 1608 .sg_tablesize = SG_ALL,
1609 .max_segment_size = SBP2_MAX_SEG_SIZE,
1613 .can_queue = 1, 1610 .can_queue = 1,
1614 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1611 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1615}; 1612};
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 23ea1ed409d1..352bd2473162 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -37,8 +37,9 @@ extern u64 efi_system_table;
37static struct ptdump_info efi_ptdump_info = { 37static struct ptdump_info efi_ptdump_info = {
38 .mm = &efi_mm, 38 .mm = &efi_mm,
39 .markers = (struct addr_marker[]){ 39 .markers = (struct addr_marker[]){
40 { 0, "UEFI runtime start" }, 40 { 0, "UEFI runtime start" },
41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } 41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
42 { -1, NULL }
42 }, 43 },
43 .base_addr = 0, 44 .base_addr = 0,
44}; 45};
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f1314248..7f9e0304b510 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, 66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
67 unsigned int nr, int value) 67 unsigned int nr, int value)
68{ 68{
69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) 69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
70 altr_a10sr_gpio_set(gc, nr, value);
70 return 0; 71 return 0;
72 }
71 return -EINVAL; 73 return -EINVAL;
72} 74}
73 75
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a7bc69..e41223c05f6e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
180 180
181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) 181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
182{ 182{
183 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); 183 struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
184
185 switch (sprd_eic->type) {
186 case SPRD_EIC_DEBOUNCE:
187 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
188 case SPRD_EIC_ASYNC:
189 return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
190 case SPRD_EIC_SYNC:
191 return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
192 default:
193 return -ENOTSUPP;
194 }
184} 195}
185 196
186static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) 197static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
368 irq_set_handler_locked(data, handle_edge_irq); 379 irq_set_handler_locked(data, handle_edge_irq);
369 break; 380 break;
370 case IRQ_TYPE_EDGE_BOTH: 381 case IRQ_TYPE_EDGE_BOTH:
382 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
371 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); 383 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
372 irq_set_handler_locked(data, handle_edge_irq); 384 irq_set_handler_locked(data, handle_edge_irq);
373 break; 385 break;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dda25a2..68a35b65925a 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
84 */ 84 */
85struct pcf857x { 85struct pcf857x {
86 struct gpio_chip chip; 86 struct gpio_chip chip;
87 struct irq_chip irqchip;
87 struct i2c_client *client; 88 struct i2c_client *client;
88 struct mutex lock; /* protect 'out' */ 89 struct mutex lock; /* protect 'out' */
89 unsigned out; /* software latch */ 90 unsigned out; /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
252 mutex_unlock(&gpio->lock); 253 mutex_unlock(&gpio->lock);
253} 254}
254 255
255static struct irq_chip pcf857x_irq_chip = {
256 .name = "pcf857x",
257 .irq_enable = pcf857x_irq_enable,
258 .irq_disable = pcf857x_irq_disable,
259 .irq_ack = noop,
260 .irq_mask = noop,
261 .irq_unmask = noop,
262 .irq_set_wake = pcf857x_irq_set_wake,
263 .irq_bus_lock = pcf857x_irq_bus_lock,
264 .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
265};
266
267/*-------------------------------------------------------------------------*/ 256/*-------------------------------------------------------------------------*/
268 257
269static int pcf857x_probe(struct i2c_client *client, 258static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
376 365
377 /* Enable irqchip if we have an interrupt */ 366 /* Enable irqchip if we have an interrupt */
378 if (client->irq) { 367 if (client->irq) {
368 gpio->irqchip.name = "pcf857x",
369 gpio->irqchip.irq_enable = pcf857x_irq_enable,
370 gpio->irqchip.irq_disable = pcf857x_irq_disable,
371 gpio->irqchip.irq_ack = noop,
372 gpio->irqchip.irq_mask = noop,
373 gpio->irqchip.irq_unmask = noop,
374 gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
375 gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
376 gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
379 status = gpiochip_irqchip_add_nested(&gpio->chip, 377 status = gpiochip_irqchip_add_nested(&gpio->chip,
380 &pcf857x_irq_chip, 378 &gpio->irqchip,
381 0, handle_level_irq, 379 0, handle_level_irq,
382 IRQ_TYPE_NONE); 380 IRQ_TYPE_NONE);
383 if (status) { 381 if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
392 if (status) 390 if (status)
393 goto fail; 391 goto fail;
394 392
395 gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, 393 gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
396 client->irq); 394 client->irq);
397 gpio->irq_parent = client->irq; 395 gpio->irq_parent = client->irq;
398 } 396 }
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 1b79ebcfce3e..541fa6ac399d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
253 struct vf610_gpio_port *port; 253 struct vf610_gpio_port *port;
254 struct resource *iores; 254 struct resource *iores;
255 struct gpio_chip *gc; 255 struct gpio_chip *gc;
256 int i;
256 int ret; 257 int ret;
257 258
258 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); 259 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
319 if (ret < 0) 320 if (ret < 0)
320 return ret; 321 return ret;
321 322
323 /* Mask all GPIO interrupts */
324 for (i = 0; i < gc->ngpio; i++)
325 vf610_gpio_writel(0, port->base + PORT_PCR(i));
326
322 /* Clear the interrupt status register for all GPIO's */ 327 /* Clear the interrupt status register for all GPIO's */
323 vf610_gpio_writel(~0, port->base + PORT_ISFR); 328 vf610_gpio_writel(~0, port->base + PORT_ISFR);
324 329
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1651d7f0a303..d1adfdf50fb3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
828 /* Do not leak kernel stack to userspace */ 828 /* Do not leak kernel stack to userspace */
829 memset(&ge, 0, sizeof(ge)); 829 memset(&ge, 0, sizeof(ge));
830 830
831 ge.timestamp = le->timestamp; 831 /*
832 * We may be running from a nested threaded interrupt in which case
833 * we didn't get the timestamp from lineevent_irq_handler().
834 */
835 if (!le->timestamp)
836 ge.timestamp = ktime_get_real_ns();
837 else
838 ge.timestamp = le->timestamp;
832 839
833 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 840 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
834 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 841 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661d9e20..92b11de19581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
580}; 581};
581 582
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index b8747a5c9204..99d596dc0e89 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
32#include "vega10_pptable.h" 32#include "vega10_pptable.h"
33 33
34#define NUM_DSPCLK_LEVELS 8 34#define NUM_DSPCLK_LEVELS 8
35#define VEGA10_ENGINECLOCK_HARDMAX 198000
35 36
36static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, 37static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
37 enum phm_platform_caps cap) 38 enum phm_platform_caps cap)
@@ -258,7 +259,26 @@ static int init_over_drive_limits(
258 struct pp_hwmgr *hwmgr, 259 struct pp_hwmgr *hwmgr,
259 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) 260 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
260{ 261{
261 hwmgr->platform_descriptor.overdriveLimit.engineClock = 262 const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
263 (const ATOM_Vega10_GFXCLK_Dependency_Table *)
264 (((unsigned long) powerplay_table) +
265 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
266 bool is_acg_enabled = false;
267 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
268
269 if (gfxclk_dep_table->ucRevId == 1) {
270 patom_record_v2 =
271 (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
272 is_acg_enabled =
273 (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
274 }
275
276 if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
277 !is_acg_enabled)
278 hwmgr->platform_descriptor.overdriveLimit.engineClock =
279 VEGA10_ENGINECLOCK_HARDMAX;
280 else
281 hwmgr->platform_descriptor.overdriveLimit.engineClock =
262 le32_to_cpu(powerplay_table->ulMaxODEngineClock); 282 le32_to_cpu(powerplay_table->ulMaxODEngineClock);
263 hwmgr->platform_descriptor.overdriveLimit.memoryClock = 283 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
264 le32_to_cpu(powerplay_table->ulMaxODMemoryClock); 284 le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 5567ddc7760f..55bb7885e228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
332 332
333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
334 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 334 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
335
336 wa_ctx->indirect_ctx.obj = NULL;
337 wa_ctx->indirect_ctx.shadow_va = NULL;
335} 338}
336 339
337static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 340static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -911,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
911 914
912 list_del_init(&workload->list); 915 list_del_init(&workload->list);
913 916
914 if (!workload->status) {
915 release_shadow_batch_buffer(workload);
916 release_shadow_wa_ctx(&workload->wa_ctx);
917 }
918
919 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { 917 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
920 /* if workload->status is not successful means HW GPU 918 /* if workload->status is not successful means HW GPU
921 * has occurred GPU hang or something wrong with i915/GVT, 919 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1283,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1283{ 1281{
1284 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1282 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1285 1283
1284 release_shadow_batch_buffer(workload);
1285 release_shadow_wa_ctx(&workload->wa_ctx);
1286
1286 if (workload->shadow_mm) 1287 if (workload->shadow_mm)
1287 intel_vgpu_mm_put(workload->shadow_mm); 1288 intel_vgpu_mm_put(workload->shadow_mm);
1288 1289
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4796f40a6d4f..eab9341a5152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
303 */ 303 */
304 if (!(prio & I915_PRIORITY_NEWCLIENT)) { 304 if (!(prio & I915_PRIORITY_NEWCLIENT)) {
305 prio |= I915_PRIORITY_NEWCLIENT; 305 prio |= I915_PRIORITY_NEWCLIENT;
306 active->sched.attr.priority = prio;
306 list_move_tail(&active->sched.link, 307 list_move_tail(&active->sched.link,
307 i915_sched_lookup_priolist(engine, prio)); 308 i915_sched_lookup_priolist(engine, prio));
308 } 309 }
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
645 int i; 646 int i;
646 647
647 priolist_for_each_request_consume(rq, rn, p, i) { 648 priolist_for_each_request_consume(rq, rn, p, i) {
649 GEM_BUG_ON(last &&
650 need_preempt(engine, last, rq_prio(rq)));
651
648 /* 652 /*
649 * Can we combine this request with the current port? 653 * Can we combine this request with the current port?
650 * It has to be the same context/ringbuffer and not 654 * It has to be the same context/ringbuffer and not
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 5beb83d1cf87..ce1b3cc4bf6d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
944 np = dev_pm_opp_get_of_node(opp); 944 np = dev_pm_opp_get_of_node(opp);
945 945
946 if (np) { 946 if (np) {
947 of_property_read_u32(np, "qcom,level", &val); 947 of_property_read_u32(np, "opp-level", &val);
948 of_node_put(np); 948 of_node_put(np);
949 } 949 }
950 950
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2e4372ef17a3..2cfee1a4fe0b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
765 adreno_gpu->rev = config->rev; 765 adreno_gpu->rev = config->rev;
766 766
767 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; 767 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
768 adreno_gpu_config.irqname = "kgsl_3d0_irq";
769 768
770 adreno_gpu_config.va_start = SZ_16M; 769 adreno_gpu_config.va_start = SZ_16M;
771 adreno_gpu_config.va_end = 0xffffffff; 770 adreno_gpu_config.va_end = 0xffffffff;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index fd75870eb17f..6aefcd6db46b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
365 &pdpu->pipe_qos_cfg); 365 &pdpu->pipe_qos_cfg);
366} 366}
367 367
368static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
369{
370 struct dpu_plane *pdpu = to_dpu_plane(plane);
371 struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
372
373 if (!pdpu->is_rt_pipe)
374 return;
375
376 pm_runtime_get_sync(&dpu_kms->pdev->dev);
377 _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
378 pm_runtime_put_sync(&dpu_kms->pdev->dev);
379}
380
381/** 368/**
382 * _dpu_plane_set_ot_limit - set OT limit for the given plane 369 * _dpu_plane_set_ot_limit - set OT limit for the given plane
383 * @plane: Pointer to drm plane 370 * @plane: Pointer to drm plane
@@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
1248} 1235}
1249 1236
1250#ifdef CONFIG_DEBUG_FS 1237#ifdef CONFIG_DEBUG_FS
1238static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
1239{
1240 struct dpu_plane *pdpu = to_dpu_plane(plane);
1241 struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
1242
1243 if (!pdpu->is_rt_pipe)
1244 return;
1245
1246 pm_runtime_get_sync(&dpu_kms->pdev->dev);
1247 _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
1248 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1249}
1250
1251static ssize_t _dpu_plane_danger_read(struct file *file, 1251static ssize_t _dpu_plane_danger_read(struct file *file,
1252 char __user *buff, size_t count, loff_t *ppos) 1252 char __user *buff, size_t count, loff_t *ppos)
1253{ 1253{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9cd6a96c6bf2..927e5d86f7c1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
250void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 250void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
251 struct msm_gem_vma *vma); 251 struct msm_gem_vma *vma);
252int msm_gem_map_vma(struct msm_gem_address_space *aspace, 252int msm_gem_map_vma(struct msm_gem_address_space *aspace,
253 struct msm_gem_vma *vma, struct sg_table *sgt, int npages); 253 struct msm_gem_vma *vma, int prot,
254 struct sg_table *sgt, int npages);
254void msm_gem_close_vma(struct msm_gem_address_space *aspace, 255void msm_gem_close_vma(struct msm_gem_address_space *aspace,
255 struct msm_gem_vma *vma); 256 struct msm_gem_vma *vma);
256 257
@@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
333struct drm_gem_object *msm_gem_import(struct drm_device *dev, 334struct drm_gem_object *msm_gem_import(struct drm_device *dev,
334 struct dma_buf *dmabuf, struct sg_table *sgt); 335 struct dma_buf *dmabuf, struct sg_table *sgt);
335 336
337__printf(2, 3)
336void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); 338void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
337 339
338int msm_framebuffer_prepare(struct drm_framebuffer *fb, 340int msm_framebuffer_prepare(struct drm_framebuffer *fb,
@@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
396int msm_debugfs_late_init(struct drm_device *dev); 398int msm_debugfs_late_init(struct drm_device *dev);
397int msm_rd_debugfs_init(struct drm_minor *minor); 399int msm_rd_debugfs_init(struct drm_minor *minor);
398void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); 400void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
401__printf(3, 4)
399void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 402void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
400 const char *fmt, ...); 403 const char *fmt, ...);
401int msm_perf_debugfs_init(struct drm_minor *minor); 404int msm_perf_debugfs_init(struct drm_minor *minor);
402void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); 405void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
403#else 406#else
404static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } 407static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
408__printf(3, 4)
405static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 409static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
406 const char *fmt, ...) {} 410 const char *fmt, ...) {}
407static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} 411static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 51a95da694d8..c8886d3071fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
391 struct msm_gem_object *msm_obj = to_msm_bo(obj); 391 struct msm_gem_object *msm_obj = to_msm_bo(obj);
392 struct msm_gem_vma *vma; 392 struct msm_gem_vma *vma;
393 struct page **pages; 393 struct page **pages;
394 int prot = IOMMU_READ;
395
396 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
397 prot |= IOMMU_WRITE;
394 398
395 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
396 400
@@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
405 if (IS_ERR(pages)) 409 if (IS_ERR(pages))
406 return PTR_ERR(pages); 410 return PTR_ERR(pages);
407 411
408 return msm_gem_map_vma(aspace, vma, msm_obj->sgt, 412 return msm_gem_map_vma(aspace, vma, prot,
409 obj->size >> PAGE_SHIFT); 413 msm_obj->sgt, obj->size >> PAGE_SHIFT);
410} 414}
411 415
412/* get iova and pin it. Should have a matching put */ 416/* get iova and pin it. Should have a matching put */
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 557360788084..49c04829cf34 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
68 68
69int 69int
70msm_gem_map_vma(struct msm_gem_address_space *aspace, 70msm_gem_map_vma(struct msm_gem_address_space *aspace,
71 struct msm_gem_vma *vma, struct sg_table *sgt, int npages) 71 struct msm_gem_vma *vma, int prot,
72 struct sg_table *sgt, int npages)
72{ 73{
73 unsigned size = npages << PAGE_SHIFT; 74 unsigned size = npages << PAGE_SHIFT;
74 int ret = 0; 75 int ret = 0;
@@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
86 87
87 if (aspace->mmu) 88 if (aspace->mmu)
88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, 89 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
89 size, IOMMU_READ | IOMMU_WRITE); 90 size, prot);
90 91
91 if (ret) 92 if (ret)
92 vma->mapped = false; 93 vma->mapped = false;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5f3eff304355..10babd18e286 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
900 } 900 }
901 901
902 /* Get Interrupt: */ 902 /* Get Interrupt: */
903 gpu->irq = platform_get_irq_byname(pdev, config->irqname); 903 gpu->irq = platform_get_irq(pdev, 0);
904 if (gpu->irq < 0) { 904 if (gpu->irq < 0) {
905 ret = gpu->irq; 905 ret = gpu->irq;
906 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); 906 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index efb49bb64191..ca17086f72c9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -31,7 +31,6 @@ struct msm_gpu_state;
31 31
32struct msm_gpu_config { 32struct msm_gpu_config {
33 const char *ioname; 33 const char *ioname;
34 const char *irqname;
35 uint64_t va_start; 34 uint64_t va_start;
36 uint64_t va_end; 35 uint64_t va_end;
37 unsigned int nr_rings; 36 unsigned int nr_rings;
@@ -63,7 +62,7 @@ struct msm_gpu_funcs {
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 62 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu); 63 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu); 64 void (*destroy)(struct msm_gpu *gpu);
66#ifdef CONFIG_DEBUG_FS 65#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
67 /* show GPU status in debugfs: */ 66 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 67 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
69 struct drm_printer *p); 68 struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 90e9d0a48dc0..d21172933d92 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
115 char *fptr = &fifo->buf[fifo->head]; 115 char *fptr = &fifo->buf[fifo->head];
116 int n; 116 int n;
117 117
118 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); 118 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
119 if (!rd->open)
120 return;
119 121
120 /* Note that smp_load_acquire() is not strictly required 122 /* Note that smp_load_acquire() is not strictly required
121 * as CIRC_SPACE_TO_END() does not access the tail more 123 * as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@ out:
213static int rd_release(struct inode *inode, struct file *file) 215static int rd_release(struct inode *inode, struct file *file)
214{ 216{
215 struct msm_rd_state *rd = inode->i_private; 217 struct msm_rd_state *rd = inode->i_private;
218
216 rd->open = false; 219 rd->open = false;
220 wake_up_all(&rd->fifo_event);
221
217 return 0; 222 return 0;
218} 223}
219 224
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0d9011..416da5376701 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
92 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 92 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
93 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; 93 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
94 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 94 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
95
96 clk_disable_unprepare(hdmi->tmds_clk);
95} 97}
96 98
97static void sun4i_hdmi_enable(struct drm_encoder *encoder) 99static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
102 104
103 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); 105 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
104 106
107 clk_prepare_enable(hdmi->tmds_clk);
108
105 sun4i_hdmi_setup_avi_infoframes(hdmi, mode); 109 sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
106 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); 110 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
107 val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); 111 val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f41d5fe51abe..9993b692598f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
125{ 125{
126 struct hid_collection *collection; 126 struct hid_collection *collection;
127 unsigned usage; 127 unsigned usage;
128 int collection_index;
128 129
129 usage = parser->local.usage[0]; 130 usage = parser->local.usage[0];
130 131
@@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
167 parser->collection_stack[parser->collection_stack_ptr++] = 168 parser->collection_stack[parser->collection_stack_ptr++] =
168 parser->device->maxcollection; 169 parser->device->maxcollection;
169 170
170 collection = parser->device->collection + 171 collection_index = parser->device->maxcollection++;
171 parser->device->maxcollection++; 172 collection = parser->device->collection + collection_index;
172 collection->type = type; 173 collection->type = type;
173 collection->usage = usage; 174 collection->usage = usage;
174 collection->level = parser->collection_stack_ptr - 1; 175 collection->level = parser->collection_stack_ptr - 1;
175 collection->parent = parser->active_collection; 176 collection->parent_idx = (collection->level == 0) ? -1 :
176 parser->active_collection = collection; 177 parser->collection_stack[collection->level - 1];
177 178
178 if (type == HID_COLLECTION_APPLICATION) 179 if (type == HID_COLLECTION_APPLICATION)
179 parser->device->maxapplication++; 180 parser->device->maxapplication++;
@@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser)
192 return -EINVAL; 193 return -EINVAL;
193 } 194 }
194 parser->collection_stack_ptr--; 195 parser->collection_stack_ptr--;
195 if (parser->active_collection)
196 parser->active_collection = parser->active_collection->parent;
197 return 0; 196 return 0;
198} 197}
199 198
@@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid,
1006 usage = &field->usage[i]; 1005 usage = &field->usage[i];
1007 1006
1008 collection = &hid->collection[usage->collection_index]; 1007 collection = &hid->collection[usage->collection_index];
1009 while (collection && collection != multiplier_collection) 1008 while (collection->parent_idx != -1 &&
1010 collection = collection->parent; 1009 collection != multiplier_collection)
1010 collection = &hid->collection[collection->parent_idx];
1011 1011
1012 if (collection || multiplier_collection == NULL) 1012 if (collection->parent_idx != -1 ||
1013 multiplier_collection == NULL)
1013 usage->resolution_multiplier = effective_multiplier; 1014 usage->resolution_multiplier = effective_multiplier;
1014 1015
1015 } 1016 }
@@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid,
1044 * applicable fields later. 1045 * applicable fields later.
1045 */ 1046 */
1046 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1047 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1047 while (multiplier_collection && 1048 while (multiplier_collection->parent_idx != -1 &&
1048 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1049 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1049 multiplier_collection = multiplier_collection->parent; 1050 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1050 1051
1051 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1052 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1052 1053
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c530476edba6..ac9fda1b5a72 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/debugfs.h> 31#include <linux/debugfs.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/kfifo.h>
33#include <linux/sched/signal.h> 34#include <linux/sched/signal.h>
34#include <linux/export.h> 35#include <linux/export.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
661/* enqueue string to 'events' ring buffer */ 662/* enqueue string to 'events' ring buffer */
662void hid_debug_event(struct hid_device *hdev, char *buf) 663void hid_debug_event(struct hid_device *hdev, char *buf)
663{ 664{
664 unsigned i;
665 struct hid_debug_list *list; 665 struct hid_debug_list *list;
666 unsigned long flags; 666 unsigned long flags;
667 667
668 spin_lock_irqsave(&hdev->debug_list_lock, flags); 668 spin_lock_irqsave(&hdev->debug_list_lock, flags);
669 list_for_each_entry(list, &hdev->debug_list, node) { 669 list_for_each_entry(list, &hdev->debug_list, node)
670 for (i = 0; buf[i]; i++) 670 kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
671 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
672 buf[i];
673 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
674 }
675 spin_unlock_irqrestore(&hdev->debug_list_lock, flags); 671 spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
676 672
677 wake_up_interruptible(&hdev->debug_wait); 673 wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
722 hid_debug_event(hdev, buf); 718 hid_debug_event(hdev, buf);
723 719
724 kfree(buf); 720 kfree(buf);
725 wake_up_interruptible(&hdev->debug_wait); 721 wake_up_interruptible(&hdev->debug_wait);
726
727} 722}
728EXPORT_SYMBOL_GPL(hid_dump_input); 723EXPORT_SYMBOL_GPL(hid_dump_input);
729 724
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
1083 goto out; 1078 goto out;
1084 } 1079 }
1085 1080
1086 if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { 1081 err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
1087 err = -ENOMEM; 1082 if (err) {
1088 kfree(list); 1083 kfree(list);
1089 goto out; 1084 goto out;
1090 } 1085 }
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
1104 size_t count, loff_t *ppos) 1099 size_t count, loff_t *ppos)
1105{ 1100{
1106 struct hid_debug_list *list = file->private_data; 1101 struct hid_debug_list *list = file->private_data;
1107 int ret = 0, len; 1102 int ret = 0, copied;
1108 DECLARE_WAITQUEUE(wait, current); 1103 DECLARE_WAITQUEUE(wait, current);
1109 1104
1110 mutex_lock(&list->read_mutex); 1105 mutex_lock(&list->read_mutex);
1111 while (ret == 0) { 1106 if (kfifo_is_empty(&list->hid_debug_fifo)) {
1112 if (list->head == list->tail) { 1107 add_wait_queue(&list->hdev->debug_wait, &wait);
1113 add_wait_queue(&list->hdev->debug_wait, &wait); 1108 set_current_state(TASK_INTERRUPTIBLE);
1114 set_current_state(TASK_INTERRUPTIBLE); 1109
1115 1110 while (kfifo_is_empty(&list->hid_debug_fifo)) {
1116 while (list->head == list->tail) { 1111 if (file->f_flags & O_NONBLOCK) {
1117 if (file->f_flags & O_NONBLOCK) { 1112 ret = -EAGAIN;
1118 ret = -EAGAIN; 1113 break;
1119 break; 1114 }
1120 }
1121 if (signal_pending(current)) {
1122 ret = -ERESTARTSYS;
1123 break;
1124 }
1125 1115
1126 if (!list->hdev || !list->hdev->debug) { 1116 if (signal_pending(current)) {
1127 ret = -EIO; 1117 ret = -ERESTARTSYS;
1128 set_current_state(TASK_RUNNING); 1118 break;
1129 goto out; 1119 }
1130 }
1131 1120
1132 /* allow O_NONBLOCK from other threads */ 1121 /* if list->hdev is NULL we cannot remove_wait_queue().
1133 mutex_unlock(&list->read_mutex); 1122 * if list->hdev->debug is 0 then hid_debug_unregister()
1134 schedule(); 1123 * was already called and list->hdev is being destroyed.
1135 mutex_lock(&list->read_mutex); 1124 * if we add remove_wait_queue() here we can hit a race.
1136 set_current_state(TASK_INTERRUPTIBLE); 1125 */
1126 if (!list->hdev || !list->hdev->debug) {
1127 ret = -EIO;
1128 set_current_state(TASK_RUNNING);
1129 goto out;
1137 } 1130 }
1138 1131
1139 set_current_state(TASK_RUNNING); 1132 /* allow O_NONBLOCK from other threads */
1140 remove_wait_queue(&list->hdev->debug_wait, &wait); 1133 mutex_unlock(&list->read_mutex);
1134 schedule();
1135 mutex_lock(&list->read_mutex);
1136 set_current_state(TASK_INTERRUPTIBLE);
1141 } 1137 }
1142 1138
1143 if (ret) 1139 __set_current_state(TASK_RUNNING);
1144 goto out; 1140 remove_wait_queue(&list->hdev->debug_wait, &wait);
1145 1141
1146 /* pass the ringbuffer contents to userspace */ 1142 if (ret)
1147copy_rest:
1148 if (list->tail == list->head)
1149 goto out; 1143 goto out;
1150 if (list->tail > list->head) {
1151 len = list->tail - list->head;
1152 if (len > count)
1153 len = count;
1154
1155 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1156 ret = -EFAULT;
1157 goto out;
1158 }
1159 ret += len;
1160 list->head += len;
1161 } else {
1162 len = HID_DEBUG_BUFSIZE - list->head;
1163 if (len > count)
1164 len = count;
1165
1166 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1167 ret = -EFAULT;
1168 goto out;
1169 }
1170 list->head = 0;
1171 ret += len;
1172 count -= len;
1173 if (count > 0)
1174 goto copy_rest;
1175 }
1176
1177 } 1144 }
1145
1146 /* pass the fifo content to userspace, locking is not needed with only
1147 * one concurrent reader and one concurrent writer
1148 */
1149 ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
1150 if (ret)
1151 goto out;
1152 ret = copied;
1178out: 1153out:
1179 mutex_unlock(&list->read_mutex); 1154 mutex_unlock(&list->read_mutex);
1180 return ret; 1155 return ret;
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
1185 struct hid_debug_list *list = file->private_data; 1160 struct hid_debug_list *list = file->private_data;
1186 1161
1187 poll_wait(file, &list->hdev->debug_wait, wait); 1162 poll_wait(file, &list->hdev->debug_wait, wait);
1188 if (list->head != list->tail) 1163 if (!kfifo_is_empty(&list->hid_debug_fifo))
1189 return EPOLLIN | EPOLLRDNORM; 1164 return EPOLLIN | EPOLLRDNORM;
1190 if (!list->hdev->debug) 1165 if (!list->hdev->debug)
1191 return EPOLLERR | EPOLLHUP; 1166 return EPOLLERR | EPOLLHUP;
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
1200 spin_lock_irqsave(&list->hdev->debug_list_lock, flags); 1175 spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
1201 list_del(&list->node); 1176 list_del(&list->node);
1202 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); 1177 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
1203 kfree(list->hid_debug_buf); 1178 kfifo_free(&list->hid_debug_fifo);
1204 kfree(list); 1179 kfree(list);
1205 1180
1206 return 0; 1181 return 0;
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void)
1246{ 1221{
1247 debugfs_remove_recursive(hid_debug_root); 1222 debugfs_remove_recursive(hid_debug_root);
1248} 1223}
1249
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 518fa76414f5..24f846d67478 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -461,6 +461,9 @@
461#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a 461#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
462#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 462#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
463 463
464#define I2C_VENDOR_ID_GOODIX 0x27c6
465#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
466
464#define USB_VENDOR_ID_GOODTOUCH 0x1aad 467#define USB_VENDOR_ID_GOODTOUCH 0x1aad
465#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f 468#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
466 469
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 8555ce7e737b..c5edfa966343 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -179,6 +179,8 @@ static const struct i2c_hid_quirks {
179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, 179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, 180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
181 I2C_HID_QUIRK_NO_RUNTIME_PM }, 181 I2C_HID_QUIRK_NO_RUNTIME_PM },
182 { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
183 I2C_HID_QUIRK_NO_RUNTIME_PM },
182 { 0, 0 } 184 { 0, 0 }
183}; 185};
184 186
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ce0ba2062723..bea4c9850247 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
701int vmbus_disconnect_ring(struct vmbus_channel *channel) 701int vmbus_disconnect_ring(struct vmbus_channel *channel)
702{ 702{
703 struct vmbus_channel *cur_channel, *tmp; 703 struct vmbus_channel *cur_channel, *tmp;
704 unsigned long flags;
705 LIST_HEAD(list);
706 int ret; 704 int ret;
707 705
708 if (channel->primary_channel != NULL) 706 if (channel->primary_channel != NULL)
709 return -EINVAL; 707 return -EINVAL;
710 708
711 /* Snapshot the list of subchannels */ 709 list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
712 spin_lock_irqsave(&channel->lock, flags);
713 list_splice_init(&channel->sc_list, &list);
714 spin_unlock_irqrestore(&channel->lock, flags);
715
716 list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
717 if (cur_channel->rescind) 710 if (cur_channel->rescind)
718 wait_for_completion(&cur_channel->rescind_event); 711 wait_for_completion(&cur_channel->rescind_event);
719 712
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5301fef16c31..7c6349a50ef1 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
888 pfn_cnt -= pgs_ol; 888 pfn_cnt -= pgs_ol;
889 /* 889 /*
890 * Check if the corresponding memory block is already 890 * Check if the corresponding memory block is already
891 * online by checking its last previously backed page. 891 * online. It is possible to observe struct pages still
892 * In case it is we need to bring rest (which was not 892 * being uninitialized here so check section instead.
893 * backed previously) online too. 893 * In case the section is online we need to bring the
894 * rest of pfns (which were not backed previously)
895 * online too.
894 */ 896 */
895 if (start_pfn > has->start_pfn && 897 if (start_pfn > has->start_pfn &&
896 !PageReserved(pfn_to_page(start_pfn - 1))) 898 online_section_nr(pfn_to_section_nr(start_pfn)))
897 hv_bring_pgs_online(has, start_pfn, pgs_ol); 899 hv_bring_pgs_online(has, start_pfn, pgs_ol);
898 900
899 } 901 }
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 64d0c85d5161..1f1a55e07733 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
164} 164}
165 165
166/* Get various debug metrics for the specified ring buffer. */ 166/* Get various debug metrics for the specified ring buffer. */
167void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 167int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
168 struct hv_ring_buffer_debug_info *debug_info) 168 struct hv_ring_buffer_debug_info *debug_info)
169{ 169{
170 u32 bytes_avail_towrite; 170 u32 bytes_avail_towrite;
171 u32 bytes_avail_toread; 171 u32 bytes_avail_toread;
172 172
173 if (ring_info->ring_buffer) { 173 if (!ring_info->ring_buffer)
174 hv_get_ringbuffer_availbytes(ring_info, 174 return -EINVAL;
175 &bytes_avail_toread, 175
176 &bytes_avail_towrite); 176 hv_get_ringbuffer_availbytes(ring_info,
177 177 &bytes_avail_toread,
178 debug_info->bytes_avail_toread = bytes_avail_toread; 178 &bytes_avail_towrite);
179 debug_info->bytes_avail_towrite = bytes_avail_towrite; 179 debug_info->bytes_avail_toread = bytes_avail_toread;
180 debug_info->current_read_index = 180 debug_info->bytes_avail_towrite = bytes_avail_towrite;
181 ring_info->ring_buffer->read_index; 181 debug_info->current_read_index = ring_info->ring_buffer->read_index;
182 debug_info->current_write_index = 182 debug_info->current_write_index = ring_info->ring_buffer->write_index;
183 ring_info->ring_buffer->write_index; 183 debug_info->current_interrupt_mask
184 debug_info->current_interrupt_mask = 184 = ring_info->ring_buffer->interrupt_mask;
185 ring_info->ring_buffer->interrupt_mask; 185 return 0;
186 }
187} 186}
188EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 187EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189 188
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d0ff65675292..403fee01572c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
313{ 313{
314 struct hv_device *hv_dev = device_to_hv_device(dev); 314 struct hv_device *hv_dev = device_to_hv_device(dev);
315 struct hv_ring_buffer_debug_info outbound; 315 struct hv_ring_buffer_debug_info outbound;
316 int ret;
316 317
317 if (!hv_dev->channel) 318 if (!hv_dev->channel)
318 return -ENODEV; 319 return -ENODEV;
319 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 320
320 return -EINVAL; 321 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
321 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 322 &outbound);
323 if (ret < 0)
324 return ret;
325
322 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 326 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
323} 327}
324static DEVICE_ATTR_RO(out_intr_mask); 328static DEVICE_ATTR_RO(out_intr_mask);
@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
328{ 332{
329 struct hv_device *hv_dev = device_to_hv_device(dev); 333 struct hv_device *hv_dev = device_to_hv_device(dev);
330 struct hv_ring_buffer_debug_info outbound; 334 struct hv_ring_buffer_debug_info outbound;
335 int ret;
331 336
332 if (!hv_dev->channel) 337 if (!hv_dev->channel)
333 return -ENODEV; 338 return -ENODEV;
334 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 339
335 return -EINVAL; 340 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
336 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 341 &outbound);
342 if (ret < 0)
343 return ret;
337 return sprintf(buf, "%d\n", outbound.current_read_index); 344 return sprintf(buf, "%d\n", outbound.current_read_index);
338} 345}
339static DEVICE_ATTR_RO(out_read_index); 346static DEVICE_ATTR_RO(out_read_index);
@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
344{ 351{
345 struct hv_device *hv_dev = device_to_hv_device(dev); 352 struct hv_device *hv_dev = device_to_hv_device(dev);
346 struct hv_ring_buffer_debug_info outbound; 353 struct hv_ring_buffer_debug_info outbound;
354 int ret;
347 355
348 if (!hv_dev->channel) 356 if (!hv_dev->channel)
349 return -ENODEV; 357 return -ENODEV;
350 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 358
351 return -EINVAL; 359 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
352 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 360 &outbound);
361 if (ret < 0)
362 return ret;
353 return sprintf(buf, "%d\n", outbound.current_write_index); 363 return sprintf(buf, "%d\n", outbound.current_write_index);
354} 364}
355static DEVICE_ATTR_RO(out_write_index); 365static DEVICE_ATTR_RO(out_write_index);
@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
360{ 370{
361 struct hv_device *hv_dev = device_to_hv_device(dev); 371 struct hv_device *hv_dev = device_to_hv_device(dev);
362 struct hv_ring_buffer_debug_info outbound; 372 struct hv_ring_buffer_debug_info outbound;
373 int ret;
363 374
364 if (!hv_dev->channel) 375 if (!hv_dev->channel)
365 return -ENODEV; 376 return -ENODEV;
366 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 377
367 return -EINVAL; 378 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
368 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 379 &outbound);
380 if (ret < 0)
381 return ret;
369 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 382 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
370} 383}
371static DEVICE_ATTR_RO(out_read_bytes_avail); 384static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
376{ 389{
377 struct hv_device *hv_dev = device_to_hv_device(dev); 390 struct hv_device *hv_dev = device_to_hv_device(dev);
378 struct hv_ring_buffer_debug_info outbound; 391 struct hv_ring_buffer_debug_info outbound;
392 int ret;
379 393
380 if (!hv_dev->channel) 394 if (!hv_dev->channel)
381 return -ENODEV; 395 return -ENODEV;
382 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 396
383 return -EINVAL; 397 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
384 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 398 &outbound);
399 if (ret < 0)
400 return ret;
385 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 401 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
386} 402}
387static DEVICE_ATTR_RO(out_write_bytes_avail); 403static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
391{ 407{
392 struct hv_device *hv_dev = device_to_hv_device(dev); 408 struct hv_device *hv_dev = device_to_hv_device(dev);
393 struct hv_ring_buffer_debug_info inbound; 409 struct hv_ring_buffer_debug_info inbound;
410 int ret;
394 411
395 if (!hv_dev->channel) 412 if (!hv_dev->channel)
396 return -ENODEV; 413 return -ENODEV;
397 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 414
398 return -EINVAL; 415 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
399 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 416 if (ret < 0)
417 return ret;
418
400 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 419 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
401} 420}
402static DEVICE_ATTR_RO(in_intr_mask); 421static DEVICE_ATTR_RO(in_intr_mask);
@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
406{ 425{
407 struct hv_device *hv_dev = device_to_hv_device(dev); 426 struct hv_device *hv_dev = device_to_hv_device(dev);
408 struct hv_ring_buffer_debug_info inbound; 427 struct hv_ring_buffer_debug_info inbound;
428 int ret;
409 429
410 if (!hv_dev->channel) 430 if (!hv_dev->channel)
411 return -ENODEV; 431 return -ENODEV;
412 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 432
413 return -EINVAL; 433 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
414 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 434 if (ret < 0)
435 return ret;
436
415 return sprintf(buf, "%d\n", inbound.current_read_index); 437 return sprintf(buf, "%d\n", inbound.current_read_index);
416} 438}
417static DEVICE_ATTR_RO(in_read_index); 439static DEVICE_ATTR_RO(in_read_index);
@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
421{ 443{
422 struct hv_device *hv_dev = device_to_hv_device(dev); 444 struct hv_device *hv_dev = device_to_hv_device(dev);
423 struct hv_ring_buffer_debug_info inbound; 445 struct hv_ring_buffer_debug_info inbound;
446 int ret;
424 447
425 if (!hv_dev->channel) 448 if (!hv_dev->channel)
426 return -ENODEV; 449 return -ENODEV;
427 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 450
428 return -EINVAL; 451 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
429 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 452 if (ret < 0)
453 return ret;
454
430 return sprintf(buf, "%d\n", inbound.current_write_index); 455 return sprintf(buf, "%d\n", inbound.current_write_index);
431} 456}
432static DEVICE_ATTR_RO(in_write_index); 457static DEVICE_ATTR_RO(in_write_index);
@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
437{ 462{
438 struct hv_device *hv_dev = device_to_hv_device(dev); 463 struct hv_device *hv_dev = device_to_hv_device(dev);
439 struct hv_ring_buffer_debug_info inbound; 464 struct hv_ring_buffer_debug_info inbound;
465 int ret;
440 466
441 if (!hv_dev->channel) 467 if (!hv_dev->channel)
442 return -ENODEV; 468 return -ENODEV;
443 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 469
444 return -EINVAL; 470 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
445 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 471 if (ret < 0)
472 return ret;
473
446 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 474 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
447} 475}
448static DEVICE_ATTR_RO(in_read_bytes_avail); 476static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
453{ 481{
454 struct hv_device *hv_dev = device_to_hv_device(dev); 482 struct hv_device *hv_dev = device_to_hv_device(dev);
455 struct hv_ring_buffer_debug_info inbound; 483 struct hv_ring_buffer_debug_info inbound;
484 int ret;
456 485
457 if (!hv_dev->channel) 486 if (!hv_dev->channel)
458 return -ENODEV; 487 return -ENODEV;
459 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 488
460 return -EINVAL; 489 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
461 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 490 if (ret < 0)
491 return ret;
492
462 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 493 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
463} 494}
464static DEVICE_ATTR_RO(in_write_bytes_avail); 495static DEVICE_ATTR_RO(in_write_bytes_avail);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c39f89d2deba..2dc628d4f1ae 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
1828 1828
1829 ret = i3c_master_retrieve_dev_info(newdev); 1829 ret = i3c_master_retrieve_dev_info(newdev);
1830 if (ret) 1830 if (ret)
1831 goto err_free_dev; 1831 goto err_detach_dev;
1832 1832
1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev); 1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev);
1834 if (olddev) { 1834 if (olddev) {
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index f8c00b94817f..bb03079fbade 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
419 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 419 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
420} 420}
421 421
422static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 422static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
423 struct dw_i3c_xfer *xfer) 423 struct dw_i3c_xfer *xfer)
424{ 424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&master->xferqueue.lock, flags);
428 if (master->xferqueue.cur == xfer) { 425 if (master->xferqueue.cur == xfer) {
429 u32 status; 426 u32 status;
430 427
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
439 } else { 436 } else {
440 list_del_init(&xfer->node); 437 list_del_init(&xfer->node);
441 } 438 }
439}
440
441static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
442 struct dw_i3c_xfer *xfer)
443{
444 unsigned long flags;
445
446 spin_lock_irqsave(&master->xferqueue.lock, flags);
447 dw_i3c_master_dequeue_xfer_locked(master, xfer);
442 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 448 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
443} 449}
444 450
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
494 complete(&xfer->comp); 500 complete(&xfer->comp);
495 501
496 if (ret < 0) { 502 if (ret < 0) {
497 dw_i3c_master_dequeue_xfer(master, xfer); 503 dw_i3c_master_dequeue_xfer_locked(master, xfer);
498 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 504 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
499 master->regs + DEVICE_CTRL); 505 master->regs + DEVICE_CTRL);
500 } 506 }
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
235 235
236int ide_queue_sense_rq(ide_drive_t *drive, void *special) 236int ide_queue_sense_rq(ide_drive_t *drive, void *special)
237{ 237{
238 struct request *sense_rq = drive->sense_rq; 238 ide_hwif_t *hwif = drive->hwif;
239 struct request *sense_rq;
240 unsigned long flags;
241
242 spin_lock_irqsave(&hwif->lock, flags);
239 243
240 /* deferred failure from ide_prep_sense() */ 244 /* deferred failure from ide_prep_sense() */
241 if (!drive->sense_rq_armed) { 245 if (!drive->sense_rq_armed) {
242 printk(KERN_WARNING PFX "%s: error queuing a sense request\n", 246 printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
243 drive->name); 247 drive->name);
248 spin_unlock_irqrestore(&hwif->lock, flags);
244 return -ENOMEM; 249 return -ENOMEM;
245 } 250 }
246 251
252 sense_rq = drive->sense_rq;
247 ide_req(sense_rq)->special = special; 253 ide_req(sense_rq)->special = special;
248 drive->sense_rq_armed = false; 254 drive->sense_rq_armed = false;
249 255
250 drive->hwif->rq = NULL; 256 drive->hwif->rq = NULL;
251 257
252 ide_insert_request_head(drive, sense_rq); 258 ide_insert_request_head(drive, sense_rq);
259 spin_unlock_irqrestore(&hwif->lock, flags);
253 return 0; 260 return 0;
254} 261}
255EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 262EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
68 } 68 }
69 69
70 if (!blk_update_request(rq, error, nr_bytes)) { 70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) 71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL; 72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
73 75
74 __blk_mq_end_request(rq, error); 76 __blk_mq_end_request(rq, error);
75 return 0; 77 return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
451 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 453 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
452} 454}
453 455
454/* 456blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
455 * Issue a new request to a device. 457 bool local_requeue)
456 */
457blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
458 const struct blk_mq_queue_data *bd)
459{ 458{
460 ide_drive_t *drive = hctx->queue->queuedata; 459 ide_hwif_t *hwif = drive->hwif;
461 ide_hwif_t *hwif = drive->hwif;
462 struct ide_host *host = hwif->host; 460 struct ide_host *host = hwif->host;
463 struct request *rq = bd->rq;
464 ide_startstop_t startstop; 461 ide_startstop_t startstop;
465 462
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { 463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
474 if (ide_lock_host(host, hwif)) 471 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE; 472 return BLK_STS_DEV_RESOURCE;
476 473
477 blk_mq_start_request(rq);
478
479 spin_lock_irq(&hwif->lock); 474 spin_lock_irq(&hwif->lock);
480 475
481 if (!ide_lock_port(hwif)) { 476 if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
511 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 506 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
512 507
513 /* 508 /*
514 * we know that the queue isn't empty, but this can happen
515 * if ->prep_rq() decides to kill a request
516 */
517 if (!rq) {
518 rq = bd->rq;
519 if (!rq) {
520 ide_unlock_port(hwif);
521 goto out;
522 }
523 }
524
525 /*
526 * Sanity: don't accept a request that isn't a PM request 509 * Sanity: don't accept a request that isn't a PM request
527 * if we are currently power managed. This is very important as 510 * if we are currently power managed. This is very important as
528 * blk_stop_queue() doesn't prevent the blk_fetch_request() 511 * blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
560 } 543 }
561 } else { 544 } else {
562plug_device: 545plug_device:
546 if (local_requeue)
547 list_add(&rq->queuelist, &drive->rq_list);
563 spin_unlock_irq(&hwif->lock); 548 spin_unlock_irq(&hwif->lock);
564 ide_unlock_host(host); 549 ide_unlock_host(host);
565 ide_requeue_and_plug(drive, rq); 550 if (!local_requeue)
551 ide_requeue_and_plug(drive, rq);
566 return BLK_STS_OK; 552 return BLK_STS_OK;
567 } 553 }
568 554
@@ -573,6 +559,26 @@ out:
573 return BLK_STS_OK; 559 return BLK_STS_OK;
574} 560}
575 561
562/*
563 * Issue a new request to a device.
564 */
565blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
566 const struct blk_mq_queue_data *bd)
567{
568 ide_drive_t *drive = hctx->queue->queuedata;
569 ide_hwif_t *hwif = drive->hwif;
570
571 spin_lock_irq(&hwif->lock);
572 if (drive->sense_rq_active) {
573 spin_unlock_irq(&hwif->lock);
574 return BLK_STS_DEV_RESOURCE;
575 }
576 spin_unlock_irq(&hwif->lock);
577
578 blk_mq_start_request(bd->rq);
579 return ide_issue_rq(drive, bd->rq, false);
580}
581
576static int drive_is_ready(ide_drive_t *drive) 582static int drive_is_ready(ide_drive_t *drive)
577{ 583{
578 ide_hwif_t *hwif = drive->hwif; 584 ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
893 899
894void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 900void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
895{ 901{
896 ide_hwif_t *hwif = drive->hwif; 902 drive->sense_rq_active = true;
897 unsigned long flags;
898
899 spin_lock_irqsave(&hwif->lock, flags);
900 list_add_tail(&rq->queuelist, &drive->rq_list); 903 list_add_tail(&rq->queuelist, &drive->rq_list);
901 spin_unlock_irqrestore(&hwif->lock, flags);
902
903 kblockd_schedule_work(&drive->rq_work); 904 kblockd_schedule_work(&drive->rq_work);
904} 905}
905EXPORT_SYMBOL_GPL(ide_insert_request_head); 906EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
55 scsi_req(rq)->cmd_len = 1; 55 scsi_req(rq)->cmd_len = 1;
56 ide_req(rq)->type = ATA_PRIV_MISC; 56 ide_req(rq)->type = ATA_PRIV_MISC;
57 spin_lock_irq(&hwif->lock);
57 ide_insert_request_head(drive, rq); 58 ide_insert_request_head(drive, rq);
59 spin_unlock_irq(&hwif->lock);
58 60
59out: 61out:
60 return; 62 return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); 1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
1160 ide_hwif_t *hwif = drive->hwif; 1160 ide_hwif_t *hwif = drive->hwif;
1161 struct request *rq; 1161 struct request *rq;
1162 blk_status_t ret;
1162 LIST_HEAD(list); 1163 LIST_HEAD(list);
1163 1164
1164 spin_lock_irq(&hwif->lock); 1165 blk_mq_quiesce_queue(drive->queue);
1165 if (!list_empty(&drive->rq_list))
1166 list_splice_init(&drive->rq_list, &list);
1167 spin_unlock_irq(&hwif->lock);
1168 1166
1169 while (!list_empty(&list)) { 1167 ret = BLK_STS_OK;
1170 rq = list_first_entry(&list, struct request, queuelist); 1168 spin_lock_irq(&hwif->lock);
1169 while (!list_empty(&drive->rq_list)) {
1170 rq = list_first_entry(&drive->rq_list, struct request, queuelist);
1171 list_del_init(&rq->queuelist); 1171 list_del_init(&rq->queuelist);
1172 blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); 1172
1173 spin_unlock_irq(&hwif->lock);
1174 ret = ide_issue_rq(drive, rq, true);
1175 spin_lock_irq(&hwif->lock);
1173 } 1176 }
1177 spin_unlock_irq(&hwif->lock);
1178
1179 blk_mq_unquiesce_queue(drive->queue);
1180
1181 if (ret != BLK_STS_OK)
1182 kblockd_schedule_work(&drive->rq_work);
1174} 1183}
1175 1184
1176static const u8 ide_hwif_to_major[] = 1185static const u8 ide_hwif_to_major[] =
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 4c8c7a620d08..a5dc13576394 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
544 drive->proc = proc_mkdir(drive->name, parent); 544 drive->proc = proc_mkdir(drive->name, parent);
545 if (drive->proc) { 545 if (drive->proc) {
546 ide_add_proc_entries(drive->proc, generic_drive_entries, drive); 546 ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
547 proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, 547 proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
548 drive->proc, &ide_settings_proc_fops, 548 drive->proc, &ide_settings_proc_fops,
549 drive); 549 drive);
550 } 550 }
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3cd830d52967..616734313f0c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
267#endif 267#endif
268 268
269struct ib_device *ib_device_get_by_index(u32 ifindex); 269struct ib_device *ib_device_get_by_index(u32 ifindex);
270void ib_device_put(struct ib_device *device);
271/* RDMA device netlink */ 270/* RDMA device netlink */
272void nldev_init(void); 271void nldev_init(void);
273void nldev_exit(void); 272void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 8872453e26c0..238ec42778ef 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index)
156 down_read(&lists_rwsem); 156 down_read(&lists_rwsem);
157 device = __ib_device_get_by_index(index); 157 device = __ib_device_get_by_index(index);
158 if (device) { 158 if (device) {
159 /* Do not return a device if unregistration has started. */ 159 if (!ib_device_try_get(device))
160 if (!refcount_inc_not_zero(&device->refcount))
161 device = NULL; 160 device = NULL;
162 } 161 }
163 up_read(&lists_rwsem); 162 up_read(&lists_rwsem);
164 return device; 163 return device;
165} 164}
166 165
166/**
167 * ib_device_put - Release IB device reference
168 * @device: device whose reference to be released
169 *
170 * ib_device_put() releases reference to the IB device to allow it to be
171 * unregistered and eventually free.
172 */
167void ib_device_put(struct ib_device *device) 173void ib_device_put(struct ib_device *device)
168{ 174{
169 if (refcount_dec_and_test(&device->refcount)) 175 if (refcount_dec_and_test(&device->refcount))
170 complete(&device->unreg_completion); 176 complete(&device->unreg_completion);
171} 177}
178EXPORT_SYMBOL(ib_device_put);
172 179
173static struct ib_device *__ib_device_get_by_name(const char *name) 180static struct ib_device *__ib_device_get_by_name(const char *name)
174{ 181{
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size)
303 rwlock_init(&device->client_data_lock); 310 rwlock_init(&device->client_data_lock);
304 INIT_LIST_HEAD(&device->client_data_list); 311 INIT_LIST_HEAD(&device->client_data_list);
305 INIT_LIST_HEAD(&device->port_list); 312 INIT_LIST_HEAD(&device->port_list);
306 refcount_set(&device->refcount, 1);
307 init_completion(&device->unreg_completion); 313 init_completion(&device->unreg_completion);
308 314
309 return device; 315 return device;
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name,
620 goto cg_cleanup; 626 goto cg_cleanup;
621 } 627 }
622 628
629 refcount_set(&device->refcount, 1);
623 device->reg_state = IB_DEV_REGISTERED; 630 device->reg_state = IB_DEV_REGISTERED;
624 631
625 list_for_each_entry(client, &client_list, list) 632 list_for_each_entry(client, &client_list, list)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index a4ec43093cb3..acb882f279cb 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
352 umem->writable = 1; 352 umem->writable = 1;
353 umem->is_odp = 1; 353 umem->is_odp = 1;
354 odp_data->per_mm = per_mm; 354 odp_data->per_mm = per_mm;
355 umem->owning_mm = per_mm->mm;
356 mmgrab(umem->owning_mm);
355 357
356 mutex_init(&odp_data->umem_mutex); 358 mutex_init(&odp_data->umem_mutex);
357 init_completion(&odp_data->notifier_completion); 359 init_completion(&odp_data->notifier_completion);
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
384out_page_list: 386out_page_list:
385 vfree(odp_data->page_list); 387 vfree(odp_data->page_list);
386out_odp_data: 388out_odp_data:
389 mmdrop(umem->owning_mm);
387 kfree(odp_data); 390 kfree(odp_data);
388 return ERR_PTR(ret); 391 return ERR_PTR(ret);
389} 392}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2890a77339e1..5f366838b7ff 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref)
204 if (atomic_dec_and_test(&file->device->refcount)) 204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device); 205 ib_uverbs_comp_dev(file->device);
206 206
207 if (file->async_file)
208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file);
207 put_device(&file->device->dev); 210 put_device(&file->device->dev);
208 kfree(file); 211 kfree(file);
209} 212}
@@ -964,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
964 967
965 /* Get an arbitrary mm pointer that hasn't been cleaned yet */ 968 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
966 mutex_lock(&ufile->umap_lock); 969 mutex_lock(&ufile->umap_lock);
967 if (!list_empty(&ufile->umaps)) { 970 while (!list_empty(&ufile->umaps)) {
968 mm = list_first_entry(&ufile->umaps, 971 int ret;
969 struct rdma_umap_priv, list) 972
970 ->vma->vm_mm; 973 priv = list_first_entry(&ufile->umaps,
971 mmget(mm); 974 struct rdma_umap_priv, list);
975 mm = priv->vma->vm_mm;
976 ret = mmget_not_zero(mm);
977 if (!ret) {
978 list_del_init(&priv->list);
979 mm = NULL;
980 continue;
981 }
982 break;
972 } 983 }
973 mutex_unlock(&ufile->umap_lock); 984 mutex_unlock(&ufile->umap_lock);
974 if (!mm) 985 if (!mm)
@@ -1096,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
1096 list_del_init(&file->list); 1107 list_del_init(&file->list);
1097 mutex_unlock(&file->device->lists_mutex); 1108 mutex_unlock(&file->device->lists_mutex);
1098 1109
1099 if (file->async_file)
1100 kref_put(&file->async_file->ref,
1101 ib_uverbs_release_async_event_file);
1102
1103 kref_put(&file->ref, ib_uverbs_release_file); 1110 kref_put(&file->ref, ib_uverbs_release_file);
1104 1111
1105 return 0; 1112 return 0;
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 5030ec480370..2a3f2f01028d 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( 168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
169 struct uverbs_attr_bundle *attrs) 169 struct uverbs_attr_bundle *attrs)
170{ 170{
171 struct ib_device *ib_dev = attrs->ufile->device->ib_dev; 171 struct ib_device *ib_dev;
172 struct ib_port_attr attr = {}; 172 struct ib_port_attr attr = {};
173 struct ib_uverbs_query_port_resp_ex resp = {}; 173 struct ib_uverbs_query_port_resp_ex resp = {};
174 struct ib_ucontext *ucontext;
174 int ret; 175 int ret;
175 u8 port_num; 176 u8 port_num;
176 177
178 ucontext = ib_uverbs_get_ucontext(attrs);
179 if (IS_ERR(ucontext))
180 return PTR_ERR(ucontext);
181 ib_dev = ucontext->device;
182
177 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ 183 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
178 if (!ib_dev->ops.query_port) 184 if (!ib_dev->ops.query_port)
179 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c22ebc774a6a..f9a7e9d29c8b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
488 vmf = 1; 488 vmf = 1;
489 break; 489 break;
490 case STATUS: 490 case STATUS:
491 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { 491 if (flags & VM_WRITE) {
492 ret = -EPERM; 492 ret = -EPERM;
493 goto done; 493 goto done;
494 } 494 }
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 88242fe95eaa..bf96067876c9 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data; 988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
989 wc.wc_flags = IB_WC_WITH_IMM; 989 wc.wc_flags = IB_WC_WITH_IMM;
990 tlen -= sizeof(u32);
991 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 990 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
992 wc.ex.imm_data = 0; 991 wc.ex.imm_data = 0;
993 wc.wc_flags = 0; 992 wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 960b1946c365..12deacf442cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
210 struct ib_udata *udata) 210 struct ib_udata *udata)
211{ 211{
212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
213 struct hns_roce_ib_create_srq_resp resp = {};
213 struct hns_roce_srq *srq; 214 struct hns_roce_srq *srq;
214 int srq_desc_size; 215 int srq_desc_size;
215 int srq_buf_size; 216 int srq_buf_size;
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
378 379
379 srq->event = hns_roce_ib_srq_event; 380 srq->event = hns_roce_ib_srq_event;
380 srq->ibsrq.ext.xrc.srq_num = srq->srqn; 381 srq->ibsrq.ext.xrc.srq_num = srq->srqn;
382 resp.srqn = srq->srqn;
381 383
382 if (udata) { 384 if (udata) {
383 if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 385 if (ib_copy_to_udata(udata, &resp,
386 min(udata->outlen, sizeof(resp)))) {
384 ret = -EFAULT; 387 ret = -EFAULT;
385 goto err_wrid; 388 goto err_srqc_alloc;
386 } 389 }
387 } 390 }
388 391
389 return &srq->ibsrq; 392 return &srq->ibsrq;
390 393
394err_srqc_alloc:
395 hns_roce_srq_free(hr_dev, srq);
396
391err_wrid: 397err_wrid:
392 kvfree(srq->wrid); 398 kvfree(srq->wrid);
393 399
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 25439da8976c..936ee1314bcd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1411 1411
1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); 1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1413 if (sqp->tx_ring[wire_tx_ix].ah) 1413 if (sqp->tx_ring[wire_tx_ix].ah)
1414 rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); 1414 mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
1415 sqp->tx_ring[wire_tx_ix].ah = ah; 1415 sqp->tx_ring[wire_tx_ix].ah = ah;
1416 ib_dma_sync_single_for_cpu(&dev->ib_dev, 1416 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1417 sqp->tx_ring[wire_tx_ix].buf.map, 1417 sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1902 if (wc.status == IB_WC_SUCCESS) { 1902 if (wc.status == IB_WC_SUCCESS) {
1903 switch (wc.opcode) { 1903 switch (wc.opcode) {
1904 case IB_WC_SEND: 1904 case IB_WC_SEND:
1905 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1905 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1908 = NULL; 1908 = NULL;
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1931 " status = %d, wrid = 0x%llx\n", 1931 " status = %d, wrid = 0x%llx\n",
1932 ctx->slave, wc.status, wc.wr_id); 1932 ctx->slave, wc.status, wc.wr_id);
1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1934 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1934 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1937 = NULL; 1937 = NULL;
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index e8a1e4498e3f..798591a18484 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), 630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
631 UAPI_DEF_CHAIN_OBJ_TREE( 631 UAPI_DEF_CHAIN_OBJ_TREE(
632 UVERBS_OBJECT_FLOW, 632 UVERBS_OBJECT_FLOW,
633 &mlx5_ib_fs, 633 &mlx5_ib_fs),
634 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
635 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 634 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
636 &mlx5_ib_flow_actions), 635 &mlx5_ib_flow_actions),
637 {}, 636 {},
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 01e0f6200631..4ee32964e1dd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1595 struct prefetch_mr_work *w = 1595 struct prefetch_mr_work *w =
1596 container_of(work, struct prefetch_mr_work, work); 1596 container_of(work, struct prefetch_mr_work, work);
1597 1597
1598 if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) 1598 if (ib_device_try_get(&w->dev->ib_dev)) {
1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, 1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
1600 w->num_sge); 1600 w->num_sge);
1601 1601 ib_device_put(&w->dev->ib_dev);
1602 }
1603 put_device(&w->dev->ib_dev.dev);
1602 kfree(w); 1604 kfree(w);
1603} 1605}
1604 1606
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1617 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, 1619 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
1618 num_sge); 1620 num_sge);
1619 1621
1620 if (dev->ib_dev.reg_state != IB_DEV_REGISTERED)
1621 return -ENODEV;
1622
1623 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); 1622 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1624 if (!work) 1623 if (!work)
1625 return -ENOMEM; 1624 return -ENOMEM;
1626 1625
1627 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); 1626 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1628 1627
1628 get_device(&dev->ib_dev.dev);
1629 work->dev = dev; 1629 work->dev = dev;
1630 work->pf_flags = pf_flags; 1630 work->pf_flags = pf_flags;
1631 work->num_sge = num_sge; 1631 work->num_sge = num_sge;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dd2ae640bc84..7db778d96ef5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1912 } 1912 }
1913 1913
1914 if (!check_flags_mask(ucmd.flags, 1914 if (!check_flags_mask(ucmd.flags,
1915 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1916 MLX5_QP_FLAG_BFREG_INDEX |
1917 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
1918 MLX5_QP_FLAG_SCATTER_CQE |
1915 MLX5_QP_FLAG_SIGNATURE | 1919 MLX5_QP_FLAG_SIGNATURE |
1916 MLX5_QP_FLAG_SCATTER_CQE | 1920 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
1917 MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1921 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1918 MLX5_QP_FLAG_BFREG_INDEX | 1922 MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1919 MLX5_QP_FLAG_TYPE_DCT | 1923 MLX5_QP_FLAG_TYPE_DCI |
1920 MLX5_QP_FLAG_TYPE_DCI | 1924 MLX5_QP_FLAG_TYPE_DCT))
1921 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1922 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
1923 return -EINVAL; 1925 return -EINVAL;
1924 1926
1925 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1927 err = get_qp_user_index(to_mucontext(pd->uobject->context),
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 868da0ece7ba..445ea19a2ec8 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
513 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
514 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
515 tlen -= sizeof(u32);
516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 515 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
517 wc.ex.imm_data = 0; 516 wc.ex.imm_data = 0;
518 wc.wc_flags = 0; 517 wc.wc_flags = 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a1bd8cfc2c25..c6cc3e4ab71d 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2910,6 +2910,8 @@ send:
2910 goto op_err; 2910 goto op_err;
2911 if (!ret) 2911 if (!ret)
2912 goto rnr_nak; 2912 goto rnr_nak;
2913 if (wqe->length > qp->r_len)
2914 goto inv_err;
2913 break; 2915 break;
2914 2916
2915 case IB_WR_RDMA_WRITE_WITH_IMM: 2917 case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3078,7 +3080,10 @@ op_err:
3078 goto err; 3080 goto err;
3079 3081
3080inv_err: 3082inv_err:
3081 send_status = IB_WC_REM_INV_REQ_ERR; 3083 send_status =
3084 sqp->ibqp.qp_type == IB_QPT_RC ?
3085 IB_WC_REM_INV_REQ_ERR :
3086 IB_WC_SUCCESS;
3082 wc.status = IB_WC_LOC_QP_OP_ERR; 3087 wc.status = IB_WC_LOC_QP_OP_ERR;
3083 goto err; 3088 goto err;
3084 3089
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1da119d901a9..73e808c1e6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
248 struct list_head list; 248 struct list_head list;
249 struct net_device *dev; 249 struct net_device *dev;
250 struct ipoib_neigh *neigh; 250 struct ipoib_neigh *neigh;
251 struct ipoib_path *path;
252 struct ipoib_tx_buf *tx_ring; 251 struct ipoib_tx_buf *tx_ring;
253 unsigned int tx_head; 252 unsigned int tx_head;
254 unsigned int tx_tail; 253 unsigned int tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e8f69..aa9dcfc36cd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1312 1312
1313 neigh->cm = tx; 1313 neigh->cm = tx;
1314 tx->neigh = neigh; 1314 tx->neigh = neigh;
1315 tx->path = path;
1316 tx->dev = dev; 1315 tx->dev = dev;
1317 list_add(&tx->list, &priv->cm.start_list); 1316 list_add(&tx->list, &priv->cm.start_list);
1318 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1317 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1371 neigh->daddr + QPN_AND_OPTIONS_OFFSET); 1370 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1372 goto free_neigh; 1371 goto free_neigh;
1373 } 1372 }
1374 memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); 1373 memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
1375 1374
1376 spin_unlock_irqrestore(&priv->lock, flags); 1375 spin_unlock_irqrestore(&priv->lock, flags);
1377 netif_tx_unlock_bh(dev); 1376 netif_tx_unlock_bh(dev);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cfc8b94527b9..aa4e431cbcd3 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -252,6 +252,8 @@ static const struct xpad_device {
252 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, 252 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
253 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, 253 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
254 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, 254 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
255 { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
256 { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
255 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, 257 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
256 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 258 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
257 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, 259 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
428 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ 430 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
429 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ 431 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
430 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ 432 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
433 XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
431 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ 434 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
432 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 435 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
433 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 436 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 8ec483e8688b..26ec603fe220 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -39,6 +39,7 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41#include <linux/miscdevice.h> 41#include <linux/miscdevice.h>
42#include <linux/overflow.h>
42#include <linux/input/mt.h> 43#include <linux/input/mt.h>
43#include "../input-compat.h" 44#include "../input-compat.h"
44 45
@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
405static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, 406static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
406 const struct input_absinfo *abs) 407 const struct input_absinfo *abs)
407{ 408{
408 int min, max; 409 int min, max, range;
409 410
410 min = abs->minimum; 411 min = abs->minimum;
411 max = abs->maximum; 412 max = abs->maximum;
@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
417 return -EINVAL; 418 return -EINVAL;
418 } 419 }
419 420
420 if (abs->flat > max - min) { 421 if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
421 printk(KERN_DEBUG 422 printk(KERN_DEBUG
422 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", 423 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
423 UINPUT_NAME, code, abs->flat, min, max); 424 UINPUT_NAME, code, abs->flat, min, max);
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index b36084710f69..a7cfab3db9ee 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -23,7 +23,6 @@
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/clk.h>
27 26
28/* 27/*
29 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. 28 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller.
@@ -75,7 +74,6 @@ struct olpc_apsp {
75 struct serio *kbio; 74 struct serio *kbio;
76 struct serio *padio; 75 struct serio *padio;
77 void __iomem *base; 76 void __iomem *base;
78 struct clk *clk;
79 int open_count; 77 int open_count;
80 int irq; 78 int irq;
81}; 79};
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port)
148 struct olpc_apsp *priv = port->port_data; 146 struct olpc_apsp *priv = port->port_data;
149 unsigned int tmp; 147 unsigned int tmp;
150 unsigned long l; 148 unsigned long l;
151 int error;
152 149
153 if (priv->open_count++ == 0) { 150 if (priv->open_count++ == 0) {
154 error = clk_prepare_enable(priv->clk);
155 if (error)
156 return error;
157
158 l = readl(priv->base + COMMAND_FIFO_STATUS); 151 l = readl(priv->base + COMMAND_FIFO_STATUS);
159 if (!(l & CMD_STS_MASK)) { 152 if (!(l & CMD_STS_MASK)) {
160 dev_err(priv->dev, "SP cannot accept commands.\n"); 153 dev_err(priv->dev, "SP cannot accept commands.\n");
161 clk_disable_unprepare(priv->clk);
162 return -EIO; 154 return -EIO;
163 } 155 }
164 156
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port)
179 /* Disable interrupt 0 */ 171 /* Disable interrupt 0 */
180 tmp = readl(priv->base + PJ_INTERRUPT_MASK); 172 tmp = readl(priv->base + PJ_INTERRUPT_MASK);
181 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); 173 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK);
182
183 clk_disable_unprepare(priv->clk);
184 } 174 }
185} 175}
186 176
@@ -195,6 +185,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
195 if (!priv) 185 if (!priv)
196 return -ENOMEM; 186 return -ENOMEM;
197 187
188 priv->dev = &pdev->dev;
189
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
199 priv->base = devm_ioremap_resource(&pdev->dev, res); 191 priv->base = devm_ioremap_resource(&pdev->dev, res);
200 if (IS_ERR(priv->base)) { 192 if (IS_ERR(priv->base)) {
@@ -206,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
206 if (priv->irq < 0) 198 if (priv->irq < 0)
207 return priv->irq; 199 return priv->irq;
208 200
209 priv->clk = devm_clk_get(&pdev->dev, "sp");
210 if (IS_ERR(priv->clk))
211 return PTR_ERR(priv->clk);
212
213 /* KEYBOARD */ 201 /* KEYBOARD */
214 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 202 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
215 if (!kb_serio) 203 if (!kb_serio)
@@ -248,7 +236,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
248 goto err_irq; 236 goto err_irq;
249 } 237 }
250 238
251 priv->dev = &pdev->dev;
252 device_init_wakeup(priv->dev, 1); 239 device_init_wakeup(priv->dev, 1);
253 platform_set_drvdata(pdev, priv); 240 platform_set_drvdata(pdev, priv);
254 241
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index af6027cc7bbf..068dbbc610fc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06
698 698
699config TOUCHSCREEN_RASPBERRYPI_FW 699config TOUCHSCREEN_RASPBERRYPI_FW
700 tristate "Raspberry Pi's firmware base touch screen support" 700 tristate "Raspberry Pi's firmware base touch screen support"
701 depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST 701 depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
702 help 702 help
703 Say Y here if you have the official Raspberry Pi 7 inch screen on 703 Say Y here if you have the official Raspberry Pi 7 inch screen on
704 your system. 704 your system.
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 87ba23a75b38..2a7b78bb98b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
1991 1991
1992static void do_detach(struct iommu_dev_data *dev_data) 1992static void do_detach(struct iommu_dev_data *dev_data)
1993{ 1993{
1994 struct protection_domain *domain = dev_data->domain;
1994 struct amd_iommu *iommu; 1995 struct amd_iommu *iommu;
1995 u16 alias; 1996 u16 alias;
1996 1997
1997 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1998 iommu = amd_iommu_rlookup_table[dev_data->devid];
1998 alias = dev_data->alias; 1999 alias = dev_data->alias;
1999 2000
2000 /* decrease reference counters */
2001 dev_data->domain->dev_iommu[iommu->index] -= 1;
2002 dev_data->domain->dev_cnt -= 1;
2003
2004 /* Update data structures */ 2001 /* Update data structures */
2005 dev_data->domain = NULL; 2002 dev_data->domain = NULL;
2006 list_del(&dev_data->list); 2003 list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
2010 2007
2011 /* Flush the DTE entry */ 2008 /* Flush the DTE entry */
2012 device_flush_dte(dev_data); 2009 device_flush_dte(dev_data);
2010
2011 /* Flush IOTLB */
2012 domain_flush_tlb_pde(domain);
2013
2014 /* Wait for the flushes to finish */
2015 domain_flush_complete(domain);
2016
2017 /* decrease reference counters - needs to happen after the flushes */
2018 domain->dev_iommu[iommu->index] -= 1;
2019 domain->dev_cnt -= 1;
2013} 2020}
2014 2021
2015/* 2022/*
@@ -2617,13 +2624,13 @@ out_unmap:
2617 bus_addr = address + s->dma_address + (j << PAGE_SHIFT); 2624 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2618 iommu_unmap_page(domain, bus_addr, PAGE_SIZE); 2625 iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2619 2626
2620 if (--mapped_pages) 2627 if (--mapped_pages == 0)
2621 goto out_free_iova; 2628 goto out_free_iova;
2622 } 2629 }
2623 } 2630 }
2624 2631
2625out_free_iova: 2632out_free_iova:
2626 free_iova_fast(&dma_dom->iovad, address, npages); 2633 free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
2627 2634
2628out_err: 2635out_err:
2629 return 0; 2636 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2bd9ac285c0d..1457f931218e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
5294 struct iommu_resv_region *entry, *next; 5294 struct iommu_resv_region *entry, *next;
5295 5295
5296 list_for_each_entry_safe(entry, next, head, list) { 5296 list_for_each_entry_safe(entry, next, head, list) {
5297 if (entry->type == IOMMU_RESV_RESERVED) 5297 if (entry->type == IOMMU_RESV_MSI)
5298 kfree(entry); 5298 kfree(entry);
5299 } 5299 }
5300} 5300}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 730f7dabcf37..7e0df67bd3e9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -441,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev)
441 iommu_spec.args_count = count; 441 iommu_spec.args_count = count;
442 442
443 mtk_iommu_create_mapping(dev, &iommu_spec); 443 mtk_iommu_create_mapping(dev, &iommu_spec);
444
445 /* dev->iommu_fwspec might have changed */
446 fwspec = dev_iommu_fwspec_get(dev);
447
444 of_node_put(iommu_spec.np); 448 of_node_put(iommu_spec.np);
445 } 449 }
446 450
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index d8947b28db2d..f04a6df65eb8 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
224 * If we have reason to believe the IOMMU driver missed the initial 224 * If we have reason to believe the IOMMU driver missed the initial
225 * probe for dev, replay it to get things in order. 225 * probe for dev, replay it to get things in order.
226 */ 226 */
227 if (dev->bus && !device_iommu_mapped(dev)) 227 if (!err && dev->bus && !device_iommu_mapped(dev))
228 err = iommu_probe_device(dev); 228 err = iommu_probe_device(dev);
229 229
230 /* Ignore all other errors apart from EPROBE_DEFER */ 230 /* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index fb7157188294..666f5986b0ce 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2405,13 +2405,14 @@ static void its_free_device(struct its_device *its_dev)
2405 kfree(its_dev); 2405 kfree(its_dev);
2406} 2406}
2407 2407
2408static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 2408static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2409{ 2409{
2410 int idx; 2410 int idx;
2411 2411
2412 idx = find_first_zero_bit(dev->event_map.lpi_map, 2412 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2413 dev->event_map.nr_lpis); 2413 dev->event_map.nr_lpis,
2414 if (idx == dev->event_map.nr_lpis) 2414 get_count_order(nvecs));
2415 if (idx < 0)
2415 return -ENOSPC; 2416 return -ENOSPC;
2416 2417
2417 *hwirq = dev->event_map.lpi_base + idx; 2418 *hwirq = dev->event_map.lpi_base + idx;
@@ -2507,21 +2508,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2507 int err; 2508 int err;
2508 int i; 2509 int i;
2509 2510
2510 for (i = 0; i < nr_irqs; i++) { 2511 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2511 err = its_alloc_device_irq(its_dev, &hwirq); 2512 if (err)
2512 if (err) 2513 return err;
2513 return err;
2514 2514
2515 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 2515 for (i = 0; i < nr_irqs; i++) {
2516 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2516 if (err) 2517 if (err)
2517 return err; 2518 return err;
2518 2519
2519 irq_domain_set_hwirq_and_chip(domain, virq + i, 2520 irq_domain_set_hwirq_and_chip(domain, virq + i,
2520 hwirq, &its_irq_chip, its_dev); 2521 hwirq + i, &its_irq_chip, its_dev);
2521 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2522 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2522 pr_debug("ID:%d pID:%d vID:%d\n", 2523 pr_debug("ID:%d pID:%d vID:%d\n",
2523 (int)(hwirq - its_dev->event_map.lpi_base), 2524 (int)(hwirq + i - its_dev->event_map.lpi_base),
2524 (int) hwirq, virq + i); 2525 (int)(hwirq + i), virq + i);
2525 } 2526 }
2526 2527
2527 return 0; 2528 return 0;
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c416e3..fbfa7ff6deb1 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@ struct mbi_range {
24 unsigned long *bm; 24 unsigned long *bm;
25}; 25};
26 26
27static struct mutex mbi_lock; 27static DEFINE_MUTEX(mbi_lock);
28static phys_addr_t mbi_phys_base; 28static phys_addr_t mbi_phys_base;
29static struct mbi_range *mbi_ranges; 29static struct mbi_range *mbi_ranges;
30static unsigned int mbi_range_nr; 30static unsigned int mbi_range_nr;
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index e9256dee1a45..8b81271c823c 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/gpio.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/irq.h> 11#include <linux/irq.h>
13#include <linux/irqdomain.h> 12#include <linux/irqdomain.h>
@@ -16,7 +15,6 @@
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/of.h> 16#include <linux/of.h>
18#include <linux/of_device.h> 17#include <linux/of_device.h>
19#include <linux/of_gpio.h>
20#include <linux/of_irq.h> 18#include <linux/of_irq.h>
21#include <linux/irqchip/irq-madera.h> 19#include <linux/irqchip/irq-madera.h>
22#include <linux/mfd/madera/core.h> 20#include <linux/mfd/madera/core.h>
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 6edfd4bfa169..a93296b9b45d 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -822,6 +822,7 @@ out_unmap:
822static const struct irq_domain_ops stm32_exti_h_domain_ops = { 822static const struct irq_domain_ops stm32_exti_h_domain_ops = {
823 .alloc = stm32_exti_h_domain_alloc, 823 .alloc = stm32_exti_h_domain_alloc,
824 .free = irq_domain_free_irqs_common, 824 .free = irq_domain_free_irqs_common,
825 .xlate = irq_domain_xlate_twocell,
825}; 826};
826 827
827static int 828static int
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 5385f5768345..27933338f7b3 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
71 unsigned int mask = 1u << d->hwirq; 71 unsigned int mask = 1u << d->hwirq;
72 72
73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 75 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
76 HW_IRQ_MX_BASE), MIENG); 76
77 } else { 77 if (ext_irq >= HW_IRQ_MX_BASE) {
78 mask = __this_cpu_read(cached_irq_mask) & ~mask; 78 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
79 __this_cpu_write(cached_irq_mask, mask); 79 return;
80 xtensa_set_sr(mask, intenable); 80 }
81 } 81 }
82 mask = __this_cpu_read(cached_irq_mask) & ~mask;
83 __this_cpu_write(cached_irq_mask, mask);
84 xtensa_set_sr(mask, intenable);
82} 85}
83 86
84static void xtensa_mx_irq_unmask(struct irq_data *d) 87static void xtensa_mx_irq_unmask(struct irq_data *d)
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
86 unsigned int mask = 1u << d->hwirq; 89 unsigned int mask = 1u << d->hwirq;
87 90
88 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 91 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
89 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 92 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
90 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 93 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
91 HW_IRQ_MX_BASE), MIENGSET); 94
92 } else { 95 if (ext_irq >= HW_IRQ_MX_BASE) {
93 mask |= __this_cpu_read(cached_irq_mask); 96 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
94 __this_cpu_write(cached_irq_mask, mask); 97 return;
95 xtensa_set_sr(mask, intenable); 98 }
96 } 99 }
100 mask |= __this_cpu_read(cached_irq_mask);
101 __this_cpu_write(cached_irq_mask, mask);
102 xtensa_set_sr(mask, intenable);
97} 103}
98 104
99static void xtensa_mx_irq_enable(struct irq_data *d) 105static void xtensa_mx_irq_enable(struct irq_data *d)
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d)
113 119
114static int xtensa_mx_irq_retrigger(struct irq_data *d) 120static int xtensa_mx_irq_retrigger(struct irq_data *d)
115{ 121{
116 xtensa_set_sr(1 << d->hwirq, intset); 122 unsigned int mask = 1u << d->hwirq;
123
124 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125 return 0;
126 xtensa_set_sr(mask, intset);
117 return 1; 127 return 1;
118} 128}
119 129
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index c200234dd2c9..ab12328be5ee 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d)
70 70
71static int xtensa_irq_retrigger(struct irq_data *d) 71static int xtensa_irq_retrigger(struct irq_data *d)
72{ 72{
73 xtensa_set_sr(1 << d->hwirq, intset); 73 unsigned int mask = 1u << d->hwirq;
74
75 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
76 return 0;
77 xtensa_set_sr(mask, intset);
74 return 1; 78 return 1;
75} 79}
76 80
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0ff22159a0ca..47d4e0d30bf0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
2414 * capi:cipher_api_spec-iv:ivopts 2414 * capi:cipher_api_spec-iv:ivopts
2415 */ 2415 */
2416 tmp = &cipher_in[strlen("capi:")]; 2416 tmp = &cipher_in[strlen("capi:")];
2417 cipher_api = strsep(&tmp, "-"); 2417
2418 *ivmode = strsep(&tmp, ":"); 2418 /* Separate IV options if present, it can contain another '-' in hash name */
2419 *ivopts = tmp; 2419 *ivopts = strrchr(tmp, ':');
2420 if (*ivopts) {
2421 **ivopts = '\0';
2422 (*ivopts)++;
2423 }
2424 /* Parse IV mode */
2425 *ivmode = strrchr(tmp, '-');
2426 if (*ivmode) {
2427 **ivmode = '\0';
2428 (*ivmode)++;
2429 }
2430 /* The rest is crypto API spec */
2431 cipher_api = tmp;
2420 2432
2421 if (*ivmode && !strcmp(*ivmode, "lmk")) 2433 if (*ivmode && !strcmp(*ivmode, "lmk"))
2422 cc->tfms_count = 64; 2434 cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
2486 goto bad_mem; 2498 goto bad_mem;
2487 2499
2488 chainmode = strsep(&tmp, "-"); 2500 chainmode = strsep(&tmp, "-");
2489 *ivopts = strsep(&tmp, "-"); 2501 *ivmode = strsep(&tmp, ":");
2490 *ivmode = strsep(&*ivopts, ":"); 2502 *ivopts = tmp;
2491
2492 if (tmp)
2493 DMWARN("Ignoring unexpected additional cipher options");
2494 2503
2495 /* 2504 /*
2496 * For compatibility with the original dm-crypt mapping format, if 2505 * For compatibility with the original dm-crypt mapping format, if
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4eb5f8c56535..a20531e5f3b4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
131static void rq_completed(struct mapped_device *md) 131static void rq_completed(struct mapped_device *md)
132{ 132{
133 /* nudge anyone waiting on suspend queue */ 133 /* nudge anyone waiting on suspend queue */
134 if (unlikely(waitqueue_active(&md->wait))) 134 if (unlikely(wq_has_sleeper(&md->wait)))
135 wake_up(&md->wait); 135 wake_up(&md->wait);
136 136
137 /* 137 /*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 20b0776e39ef..ed3caceaed07 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1678 return r; 1678 return r;
1679} 1679}
1680 1680
1681int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) 1681int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1682{ 1682{
1683 int r; 1683 int r;
1684 uint32_t ref_count; 1684 uint32_t ref_count;
@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1686 down_read(&pmd->root_lock); 1686 down_read(&pmd->root_lock);
1687 r = dm_sm_get_count(pmd->data_sm, b, &ref_count); 1687 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1688 if (!r) 1688 if (!r)
1689 *result = (ref_count != 0); 1689 *result = (ref_count > 1);
1690 up_read(&pmd->root_lock); 1690 up_read(&pmd->root_lock);
1691 1691
1692 return r; 1692 return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 35e954ea20a9..f6be0d733c20 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
195 195
196int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); 196int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
197 197
198int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); 198int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
199 199
200int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); 200int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
201int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); 201int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index dadd9696340c..ca8af21bf644 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1048 * passdown we have to check that these blocks are now unused. 1048 * passdown we have to check that these blocks are now unused.
1049 */ 1049 */
1050 int r = 0; 1050 int r = 0;
1051 bool used = true; 1051 bool shared = true;
1052 struct thin_c *tc = m->tc; 1052 struct thin_c *tc = m->tc;
1053 struct pool *pool = tc->pool; 1053 struct pool *pool = tc->pool;
1054 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; 1054 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1058 while (b != end) { 1058 while (b != end) {
1059 /* find start of unmapped run */ 1059 /* find start of unmapped run */
1060 for (; b < end; b++) { 1060 for (; b < end; b++) {
1061 r = dm_pool_block_is_used(pool->pmd, b, &used); 1061 r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1062 if (r) 1062 if (r)
1063 goto out; 1063 goto out;
1064 1064
1065 if (!used) 1065 if (!shared)
1066 break; 1066 break;
1067 } 1067 }
1068 1068
@@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1071 1071
1072 /* find end of run */ 1072 /* find end of run */
1073 for (e = b + 1; e != end; e++) { 1073 for (e = b + 1; e != end; e++) {
1074 r = dm_pool_block_is_used(pool->pmd, e, &used); 1074 r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1075 if (r) 1075 if (r)
1076 goto out; 1076 goto out;
1077 1077
1078 if (used) 1078 if (shared)
1079 break; 1079 break;
1080 } 1080 }
1081 1081
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d67c95ef8d7e..515e6af9bed2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
699 true, duration, &io->stats_aux); 699 true, duration, &io->stats_aux);
700 700
701 /* nudge anyone waiting on suspend queue */ 701 /* nudge anyone waiting on suspend queue */
702 if (unlikely(waitqueue_active(&md->wait))) 702 if (unlikely(wq_has_sleeper(&md->wait)))
703 wake_up(&md->wait); 703 wake_up(&md->wait);
704} 704}
705 705
@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1320 1320
1321 __bio_clone_fast(clone, bio); 1321 __bio_clone_fast(clone, bio);
1322 1322
1323 if (unlikely(bio_integrity(bio) != NULL)) { 1323 if (bio_integrity(bio)) {
1324 int r; 1324 int r;
1325 1325
1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
@@ -1339,7 +1339,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1340 clone->bi_iter.bi_size = to_bytes(len); 1340 clone->bi_iter.bi_size = to_bytes(len);
1341 1341
1342 if (unlikely(bio_integrity(bio) != NULL)) 1342 if (bio_integrity(bio))
1343 bio_integrity_trim(clone); 1343 bio_integrity_trim(clone);
1344 1344
1345 return 0; 1345 return 0;
@@ -1588,6 +1588,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1588 ci->sector = bio->bi_iter.bi_sector; 1588 ci->sector = bio->bi_iter.bi_sector;
1589} 1589}
1590 1590
1591#define __dm_part_stat_sub(part, field, subnd) \
1592 (part_stat_get(part, field) -= (subnd))
1593
1591/* 1594/*
1592 * Entry point to split a bio into clones and submit them to the targets. 1595 * Entry point to split a bio into clones and submit them to the targets.
1593 */ 1596 */
@@ -1642,7 +1645,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1642 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1645 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1643 GFP_NOIO, &md->queue->bio_split); 1646 GFP_NOIO, &md->queue->bio_split);
1644 ci.io->orig_bio = b; 1647 ci.io->orig_bio = b;
1648
1649 /*
1650 * Adjust IO stats for each split, otherwise upon queue
1651 * reentry there will be redundant IO accounting.
1652 * NOTE: this is a stop-gap fix, a proper fix involves
1653 * significant refactoring of DM core's bio splitting
1654 * (by eliminating DM's splitting and just using bio_split)
1655 */
1656 part_stat_lock();
1657 __dm_part_stat_sub(&dm_disk(md)->part0,
1658 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1659 part_stat_unlock();
1660
1645 bio_chain(b, bio); 1661 bio_chain(b, bio);
1662 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1646 ret = generic_make_request(bio); 1663 ret = generic_make_request(bio);
1647 break; 1664 break;
1648 } 1665 }
@@ -1713,6 +1730,15 @@ out:
1713 return ret; 1730 return ret;
1714} 1731}
1715 1732
1733static blk_qc_t dm_process_bio(struct mapped_device *md,
1734 struct dm_table *map, struct bio *bio)
1735{
1736 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1737 return __process_bio(md, map, bio);
1738 else
1739 return __split_and_process_bio(md, map, bio);
1740}
1741
1716static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1742static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1717{ 1743{
1718 struct mapped_device *md = q->queuedata; 1744 struct mapped_device *md = q->queuedata;
@@ -1733,10 +1759,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1733 return ret; 1759 return ret;
1734 } 1760 }
1735 1761
1736 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1762 ret = dm_process_bio(md, map, bio);
1737 ret = __process_bio(md, map, bio);
1738 else
1739 ret = __split_and_process_bio(md, map, bio);
1740 1763
1741 dm_put_live_table(md, srcu_idx); 1764 dm_put_live_table(md, srcu_idx);
1742 return ret; 1765 return ret;
@@ -2415,9 +2438,9 @@ static void dm_wq_work(struct work_struct *work)
2415 break; 2438 break;
2416 2439
2417 if (dm_request_based(md)) 2440 if (dm_request_based(md))
2418 generic_make_request(c); 2441 (void) generic_make_request(c);
2419 else 2442 else
2420 __split_and_process_bio(md, map, c); 2443 (void) dm_process_bio(md, map, c);
2421 } 2444 }
2422 2445
2423 dm_put_live_table(md, srcu_idx); 2446 dm_put_live_table(md, srcu_idx);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
1935} 1935}
1936 1936
1937static struct stripe_head * 1937static struct stripe_head *
1938r5c_recovery_alloc_stripe(struct r5conf *conf, 1938r5c_recovery_alloc_stripe(
1939 sector_t stripe_sect) 1939 struct r5conf *conf,
1940 sector_t stripe_sect,
1941 int noblock)
1940{ 1942{
1941 struct stripe_head *sh; 1943 struct stripe_head *sh;
1942 1944
1943 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945 sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
1944 if (!sh) 1946 if (!sh)
1945 return NULL; /* no more stripe available */ 1947 return NULL; /* no more stripe available */
1946 1948
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2150 stripe_sect); 2152 stripe_sect);
2151 2153
2152 if (!sh) { 2154 if (!sh) {
2153 sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2155 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2154 /* 2156 /*
2155 * cannot get stripe from raid5_get_active_stripe 2157 * cannot get stripe from raid5_get_active_stripe
2156 * try replay some stripes 2158 * try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2159 r5c_recovery_replay_stripes( 2161 r5c_recovery_replay_stripes(
2160 cached_stripe_list, ctx); 2162 cached_stripe_list, ctx);
2161 sh = r5c_recovery_alloc_stripe( 2163 sh = r5c_recovery_alloc_stripe(
2162 conf, stripe_sect); 2164 conf, stripe_sect, 1);
2163 } 2165 }
2164 if (!sh) { 2166 if (!sh) {
2167 int new_size = conf->min_nr_stripes * 2;
2165 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2168 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2166 mdname(mddev), 2169 mdname(mddev),
2167 conf->min_nr_stripes * 2); 2170 new_size);
2168 raid5_set_cache_size(mddev, 2171 ret = raid5_set_cache_size(mddev, new_size);
2169 conf->min_nr_stripes * 2); 2172 if (conf->min_nr_stripes <= new_size / 2) {
2170 sh = r5c_recovery_alloc_stripe(conf, 2173 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2171 stripe_sect); 2174 mdname(mddev),
2175 ret,
2176 new_size,
2177 conf->min_nr_stripes,
2178 conf->max_nr_stripes);
2179 return -ENOMEM;
2180 }
2181 sh = r5c_recovery_alloc_stripe(
2182 conf, stripe_sect, 0);
2172 } 2183 }
2173 if (!sh) { 2184 if (!sh) {
2174 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2185 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2175 mdname(mddev)); 2186 mdname(mddev));
2176 return -ENOMEM; 2187 return -ENOMEM;
2177 } 2188 }
2178 list_add_tail(&sh->lru, cached_stripe_list); 2189 list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6369int 6369int
6370raid5_set_cache_size(struct mddev *mddev, int size) 6370raid5_set_cache_size(struct mddev *mddev, int size)
6371{ 6371{
6372 int result = 0;
6372 struct r5conf *conf = mddev->private; 6373 struct r5conf *conf = mddev->private;
6373 6374
6374 if (size <= 16 || size > 32768) 6375 if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6385 6386
6386 mutex_lock(&conf->cache_size_mutex); 6387 mutex_lock(&conf->cache_size_mutex);
6387 while (size > conf->max_nr_stripes) 6388 while (size > conf->max_nr_stripes)
6388 if (!grow_one_stripe(conf, GFP_KERNEL)) 6389 if (!grow_one_stripe(conf, GFP_KERNEL)) {
6390 conf->min_nr_stripes = conf->max_nr_stripes;
6391 result = -ENOMEM;
6389 break; 6392 break;
6393 }
6390 mutex_unlock(&conf->cache_size_mutex); 6394 mutex_unlock(&conf->cache_size_mutex);
6391 6395
6392 return 0; 6396 return result;
6393} 6397}
6394EXPORT_SYMBOL(raid5_set_cache_size); 6398EXPORT_SYMBOL(raid5_set_cache_size);
6395 6399
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f461460a2aeb..76f9909cf396 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1419,7 +1419,7 @@ config MFD_TPS65217
1419 1419
1420config MFD_TPS68470 1420config MFD_TPS68470
1421 bool "TI TPS68470 Power Management / LED chips" 1421 bool "TI TPS68470 Power Management / LED chips"
1422 depends on ACPI && I2C=y 1422 depends on ACPI && PCI && I2C=y
1423 select MFD_CORE 1423 select MFD_CORE
1424 select REGMAP_I2C 1424 select REGMAP_I2C
1425 select I2C_DESIGNWARE_PLATFORM 1425 select I2C_DESIGNWARE_PLATFORM
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index b8aaa684c397..2ed23c99f59f 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
820 * 820 *
821 * Return: 821 * Return:
822 * 0 - Success 822 * 0 - Success
823 * Non-zero - Failure
823 */ 824 */
824static int ibmvmc_open(struct inode *inode, struct file *file) 825static int ibmvmc_open(struct inode *inode, struct file *file)
825{ 826{
826 struct ibmvmc_file_session *session; 827 struct ibmvmc_file_session *session;
827 int rc = 0;
828 828
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, 829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 (unsigned long)inode, (unsigned long)file, 830 (unsigned long)inode, (unsigned long)file,
831 ibmvmc.state); 831 ibmvmc.state);
832 832
833 session = kzalloc(sizeof(*session), GFP_KERNEL); 833 session = kzalloc(sizeof(*session), GFP_KERNEL);
834 if (!session)
835 return -ENOMEM;
836
834 session->file = file; 837 session->file = file;
835 file->private_data = session; 838 file->private_data = session;
836 839
837 return rc; 840 return 0;
838} 841}
839 842
840/** 843/**
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 78c26cebf5d4..8f7616557c97 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; 1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
1188 1188
1189 if (dma_setup_res->status) { 1189 if (dma_setup_res->status) {
1190 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", 1190 u8 status = dma_setup_res->status;
1191 dma_setup_res->status, 1191
1192 mei_hbm_status_str(dma_setup_res->status)); 1192 if (status == MEI_HBMS_NOT_ALLOWED) {
1193 dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
1194 } else {
1195 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
1196 status,
1197 mei_hbm_status_str(status));
1198 }
1193 dev->hbm_f_dr_supported = 0; 1199 dev->hbm_f_dr_supported = 0;
1194 mei_dmam_ring_free(dev); 1200 mei_dmam_ring_free(dev);
1195 } 1201 }
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index e4b10b2d1a08..23739a60517f 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
129 129
130#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
131
130#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ 132#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
131 133
132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 134#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 73ace2d59dea..e89497f858ae 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, 88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, 89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, 90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, 91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
92 92
93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
95 95
96 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
97
96 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
97 99
98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 100 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 595ac065b401..95ff7c5a1dfb 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
70 struct resource r; 70 struct resource r;
71 71
72 if (acpi_dev_resource_io(res, &r)) { 72 if (acpi_dev_resource_io(res, &r)) {
73#ifdef CONFIG_HAS_IOPORT_MAP
73 base = ioport_map(r.start, resource_size(&r)); 74 base = ioport_map(r.start, resource_size(&r));
74 return AE_OK; 75 return AE_OK;
76#else
77 return AE_ERROR;
78#endif
75 } else if (acpi_dev_resource_memory(res, &r)) { 79 } else if (acpi_dev_resource_memory(res, &r)) {
76 base = ioremap(r.start, resource_size(&r)); 80 base = ioremap(r.start, resource_size(&r));
77 return AE_OK; 81 return AE_OK;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e26b8145efb3..a44ec8bb5418 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -116,7 +116,7 @@ config MMC_RICOH_MMC
116 116
117config MMC_SDHCI_ACPI 117config MMC_SDHCI_ACPI
118 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 118 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
119 depends on MMC_SDHCI && ACPI 119 depends on MMC_SDHCI && ACPI && PCI
120 select IOSF_MBI if X86 120 select IOSF_MBI if X86
121 help 121 help
122 This selects support for ACPI enumerated SDHCI controllers, 122 This selects support for ACPI enumerated SDHCI controllers,
@@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP
978 tristate "TI SDHCI Controller Support" 978 tristate "TI SDHCI Controller Support"
979 depends on MMC_SDHCI_PLTFM && OF 979 depends on MMC_SDHCI_PLTFM && OF
980 select THERMAL 980 select THERMAL
981 select TI_SOC_THERMAL 981 imply TI_SOC_THERMAL
982 help 982 help
983 This selects the Secure Digital Host Controller Interface (SDHCI) 983 This selects the Secure Digital Host Controller Interface (SDHCI)
984 support present in TI's DRA7 SOCs. The controller supports 984 support present in TI's DRA7 SOCs. The controller supports
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 50293529d6de..c9e7aa50bb0a 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev)
1431 1431
1432err: 1432err:
1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret); 1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1434 if (host->dma_chan_rxtx)
1435 dma_release_channel(host->dma_chan_rxtx);
1434 mmc_free_host(mmc); 1436 mmc_free_host(mmc);
1435 1437
1436 return ret; 1438 return ret;
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index ed8f2254b66a..aa38b1a8017e 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -1,11 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) 2018 Mellanox Technologies. 3 * Copyright (C) 2018 Mellanox Technologies.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */ 4 */
10 5
11#include <linux/bitfield.h> 6#include <linux/bitfield.h>
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c2690c1a50ff..f19ec60bcbdc 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -179,6 +179,8 @@ struct meson_host {
179 struct sd_emmc_desc *descs; 179 struct sd_emmc_desc *descs;
180 dma_addr_t descs_dma_addr; 180 dma_addr_t descs_dma_addr;
181 181
182 int irq;
183
182 bool vqmmc_enabled; 184 bool vqmmc_enabled;
183}; 185};
184 186
@@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
738static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 740static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
739{ 741{
740 struct meson_host *host = mmc_priv(mmc); 742 struct meson_host *host = mmc_priv(mmc);
743 int adj = 0;
744
745 /* enable signal resampling w/o delay */
746 adj = ADJUST_ADJ_EN;
747 writel(adj, host->regs + host->data->adjust);
741 748
742 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 749 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
743} 750}
@@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
768 if (!IS_ERR(mmc->supply.vmmc)) 775 if (!IS_ERR(mmc->supply.vmmc))
769 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 776 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
770 777
778 /* disable signal resampling */
779 writel(0, host->regs + host->data->adjust);
780
771 /* Reset rx phase */ 781 /* Reset rx phase */
772 clk_set_phase(host->rx_clk, 0); 782 clk_set_phase(host->rx_clk, 0);
773 783
@@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
1166 1176
1167static void meson_mmc_cfg_init(struct meson_host *host) 1177static void meson_mmc_cfg_init(struct meson_host *host)
1168{ 1178{
1169 u32 cfg = 0, adj = 0; 1179 u32 cfg = 0;
1170 1180
1171 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 1181 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
1172 ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 1182 ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
@@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host)
1177 cfg |= CFG_ERR_ABORT; 1187 cfg |= CFG_ERR_ABORT;
1178 1188
1179 writel(cfg, host->regs + SD_EMMC_CFG); 1189 writel(cfg, host->regs + SD_EMMC_CFG);
1180
1181 /* enable signal resampling w/o delay */
1182 adj = ADJUST_ADJ_EN;
1183 writel(adj, host->regs + host->data->adjust);
1184} 1190}
1185 1191
1186static int meson_mmc_card_busy(struct mmc_host *mmc) 1192static int meson_mmc_card_busy(struct mmc_host *mmc)
@@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1231 struct resource *res; 1237 struct resource *res;
1232 struct meson_host *host; 1238 struct meson_host *host;
1233 struct mmc_host *mmc; 1239 struct mmc_host *mmc;
1234 int ret, irq; 1240 int ret;
1235 1241
1236 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 1242 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1237 if (!mmc) 1243 if (!mmc)
@@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1276 goto free_host; 1282 goto free_host;
1277 } 1283 }
1278 1284
1279 irq = platform_get_irq(pdev, 0); 1285 host->irq = platform_get_irq(pdev, 0);
1280 if (irq <= 0) { 1286 if (host->irq <= 0) {
1281 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 1287 dev_err(&pdev->dev, "failed to get interrupt resource.\n");
1282 ret = -EINVAL; 1288 ret = -EINVAL;
1283 goto free_host; 1289 goto free_host;
@@ -1331,9 +1337,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1331 writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 1337 writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1332 host->regs + SD_EMMC_IRQ_EN); 1338 host->regs + SD_EMMC_IRQ_EN);
1333 1339
1334 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 1340 ret = request_threaded_irq(host->irq, meson_mmc_irq,
1335 meson_mmc_irq_thread, IRQF_SHARED, 1341 meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
1336 NULL, host);
1337 if (ret) 1342 if (ret)
1338 goto err_init_clk; 1343 goto err_init_clk;
1339 1344
@@ -1351,7 +1356,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1351 if (host->bounce_buf == NULL) { 1356 if (host->bounce_buf == NULL) {
1352 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 1357 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1353 ret = -ENOMEM; 1358 ret = -ENOMEM;
1354 goto err_init_clk; 1359 goto err_free_irq;
1355 } 1360 }
1356 1361
1357 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1362 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1370,6 +1375,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1370err_bounce_buf: 1375err_bounce_buf:
1371 dma_free_coherent(host->dev, host->bounce_buf_size, 1376 dma_free_coherent(host->dev, host->bounce_buf_size,
1372 host->bounce_buf, host->bounce_dma_addr); 1377 host->bounce_buf, host->bounce_dma_addr);
1378err_free_irq:
1379 free_irq(host->irq, host);
1373err_init_clk: 1380err_init_clk:
1374 clk_disable_unprepare(host->mmc_clk); 1381 clk_disable_unprepare(host->mmc_clk);
1375err_core_clk: 1382err_core_clk:
@@ -1387,6 +1394,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
1387 1394
1388 /* disable interrupts */ 1395 /* disable interrupts */
1389 writel(0, host->regs + SD_EMMC_IRQ_EN); 1396 writel(0, host->regs + SD_EMMC_IRQ_EN);
1397 free_irq(host->irq, host);
1390 1398
1391 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1399 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1392 host->descs, host->descs_dma_addr); 1400 host->descs, host->descs_dma_addr);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8afeaf81ae66..833ef0590af8 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
846 846
847 if (timing == MMC_TIMING_MMC_HS400 && 847 if (timing == MMC_TIMING_MMC_HS400 &&
848 host->dev_comp->hs400_tune) 848 host->dev_comp->hs400_tune)
849 sdr_set_field(host->base + PAD_CMD_TUNE, 849 sdr_set_field(host->base + tune_reg,
850 MSDC_PAD_TUNE_CMDRRDLY, 850 MSDC_PAD_TUNE_CMDRRDLY,
851 host->hs400_cmd_int_delay); 851 host->hs400_cmd_int_delay);
852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, 852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0db99057c44f..9d12c06c7fd6 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
296 296
297 iproc_host->data = iproc_data; 297 iproc_host->data = iproc_data;
298 298
299 mmc_of_parse(host->mmc); 299 ret = mmc_of_parse(host->mmc);
300 if (ret)
301 goto err;
302
300 sdhci_get_property(pdev); 303 sdhci_get_property(pdev);
301 304
302 host->mmc->caps |= iproc_host->data->mmc_caps; 305 host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index a0f954f36c09..44e6c7b1b222 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser)
257 if (skb->len == 0) { 257 if (skb->len == 0) {
258 struct sk_buff *tmp = skb_dequeue(&ser->head); 258 struct sk_buff *tmp = skb_dequeue(&ser->head);
259 WARN_ON(tmp != skb); 259 WARN_ON(tmp != skb);
260 if (in_interrupt()) 260 dev_consume_skb_any(skb);
261 dev_kfree_skb_irq(skb);
262 else
263 kfree_skb(skb);
264 } 261 }
265 } 262 }
266 /* Send flow off if queue is empty */ 263 /* Send flow off if queue is empty */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3b3f88ffab53..c05e4d50d43d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) 480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
481{ 481{
482 struct can_priv *priv = netdev_priv(dev); 482 struct can_priv *priv = netdev_priv(dev);
483 struct sk_buff *skb = priv->echo_skb[idx];
484 struct canfd_frame *cf;
485 483
486 if (idx >= priv->echo_skb_max) { 484 if (idx >= priv->echo_skb_max) {
487 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", 485 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
489 return NULL; 487 return NULL;
490 } 488 }
491 489
492 if (!skb) { 490 if (priv->echo_skb[idx]) {
493 netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", 491 /* Using "struct canfd_frame::len" for the frame
494 __func__, idx); 492 * length is supported on both CAN and CANFD frames.
495 return NULL; 493 */
496 } 494 struct sk_buff *skb = priv->echo_skb[idx];
495 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
496 u8 len = cf->len;
497 497
498 /* Using "struct canfd_frame::len" for the frame 498 *len_ptr = len;
499 * length is supported on both CAN and CANFD frames. 499 priv->echo_skb[idx] = NULL;
500 */
501 cf = (struct canfd_frame *)skb->data;
502 *len_ptr = cf->len;
503 priv->echo_skb[idx] = NULL;
504 500
505 return skb; 501 return skb;
502 }
503
504 return NULL;
506} 505}
507 506
508/* 507/*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0f36eafe3ac1..1c66fb2ad76b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev)
1106 } 1106 }
1107 } else { 1107 } else {
1108 /* clear and invalidate unused mailboxes first */ 1108 /* clear and invalidate unused mailboxes first */
1109 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { 1109 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
1110 mb = flexcan_get_mb(priv, i); 1110 mb = flexcan_get_mb(priv, i);
1111 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, 1111 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1112 &mb->can_ctrl); 1112 &mb->can_ctrl);
@@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1432 gpr_np = of_find_node_by_phandle(phandle); 1432 gpr_np = of_find_node_by_phandle(phandle);
1433 if (!gpr_np) { 1433 if (!gpr_np) {
1434 dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); 1434 dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
1435 return PTR_ERR(gpr_np); 1435 return -ENODEV;
1436 } 1436 }
1437 1437
1438 priv = netdev_priv(dev); 1438 priv = netdev_priv(dev);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2caa8c8b4b55..1bfc5ff8d81d 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -664,7 +664,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
664 if (port < 9) 664 if (port < 9)
665 return 0; 665 return 0;
666 666
667 return mv88e6390_serdes_irq_setup(chip, port); 667 return mv88e6390x_serdes_irq_setup(chip, port);
668} 668}
669 669
670void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) 670void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 4f11f98347ed..1827ef1f6d55 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev,
2059 if (skb) { 2059 if (skb) {
2060 dev->stats.tx_packets++; 2060 dev->stats.tx_packets++;
2061 dev->stats.tx_bytes += skb->len; 2061 dev->stats.tx_bytes += skb->len;
2062 dev_kfree_skb_irq(skb); 2062 dev_consume_skb_irq(skb);
2063 info->skb = NULL; 2063 info->skb = NULL;
2064 } 2064 }
2065 2065
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986ba3290..0ae723f75341 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
145 & 0xffff; 145 & 0xffff;
146 146
147 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
148 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = max_t(int,
149 priv->tx_prod - priv->tx_cons - inuse - 1, 0);
149 } else { 150 } else {
150 /* Check for buffered last packet */ 151 /* Check for buffered last packet */
151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); 152 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 02921d877c08..aa1d1f5339d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
714 714
715 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, 715 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
716 priv->phy_iface); 716 priv->phy_iface);
717 if (IS_ERR(phydev)) 717 if (IS_ERR(phydev)) {
718 netdev_err(dev, "Could not attach to PHY\n"); 718 netdev_err(dev, "Could not attach to PHY\n");
719 phydev = NULL;
720 }
719 721
720 } else { 722 } else {
721 int ret; 723 int ret;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a90080f12e67..e548c0ae2e00 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev)
666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], 666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
667 lp->tx_skbuff[tx_index]->len, 667 lp->tx_skbuff[tx_index]->len,
668 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
669 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); 669 dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
670 lp->tx_skbuff[tx_index] = NULL; 670 lp->tx_skbuff[tx_index] = NULL;
671 lp->tx_dma_addr[tx_index] = 0; 671 lp->tx_dma_addr[tx_index] = 0;
672 } 672 }
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 6a8e2567f2bd..4d3855ceb500 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
777 777
778 if (bp->tx_bufs[bp->tx_empty]) { 778 if (bp->tx_bufs[bp->tx_empty]) {
779 ++dev->stats.tx_packets; 779 ++dev->stats.tx_packets;
780 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 780 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
781 } 781 }
782 bp->tx_bufs[bp->tx_empty] = NULL; 782 bp->tx_bufs[bp->tx_empty] = NULL;
783 bp->tx_fullup = 0; 783 bp->tx_fullup = 0;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f44808959ff3..97ab0dd25552 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp)
638 bytes_compl += skb->len; 638 bytes_compl += skb->len;
639 pkts_compl++; 639 pkts_compl++;
640 640
641 dev_kfree_skb_irq(skb); 641 dev_consume_skb_irq(skb);
642 } 642 }
643 643
644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); 644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
@@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1012 } 1012 }
1013 1013
1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); 1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1015 dev_kfree_skb_any(skb); 1015 dev_consume_skb_any(skb);
1016 skb = bounce_skb; 1016 skb = bounce_skb;
1017 } 1017 }
1018 1018
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c92cf6..9bbaad9f3d63 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
643#define MACB_CAPS_JUMBO 0x00000020 643#define MACB_CAPS_JUMBO 0x00000020
644#define MACB_CAPS_GEM_HAS_PTP 0x00000040 644#define MACB_CAPS_GEM_HAS_PTP 0x00000040
645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
646#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
646#define MACB_CAPS_FIFO_MODE 0x10000000 647#define MACB_CAPS_FIFO_MODE 0x10000000
647#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 648#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
648#define MACB_CAPS_SG_DISABLED 0x40000000 649#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1214,6 +1215,8 @@ struct macb {
1214 1215
1215 int rx_bd_rd_prefetch; 1216 int rx_bd_rd_prefetch;
1216 int tx_bd_rd_prefetch; 1217 int tx_bd_rd_prefetch;
1218
1219 u32 rx_intr_mask;
1217}; 1220};
1218 1221
1219#ifdef CONFIG_MACB_USE_HWSTAMP 1222#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 66cc7927061a..2b2882615e8b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -56,8 +56,7 @@
56/* level of occupied TX descriptors under which we wake up TX process */ 56/* level of occupied TX descriptors under which we wake up TX process */
57#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 57#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
58 58
59#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 59#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
60 | MACB_BIT(ISR_ROVR))
61#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
62 | MACB_BIT(ISR_RLE) \ 61 | MACB_BIT(ISR_RLE) \
63 | MACB_BIT(TXERR)) 62 | MACB_BIT(TXERR))
@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
1270 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1269 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1271 napi_reschedule(napi); 1270 napi_reschedule(napi);
1272 } else { 1271 } else {
1273 queue_writel(queue, IER, MACB_RX_INT_FLAGS); 1272 queue_writel(queue, IER, bp->rx_intr_mask);
1274 } 1273 }
1275 } 1274 }
1276 1275
@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data)
1288 u32 ctrl; 1287 u32 ctrl;
1289 1288
1290 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1289 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1291 queue_writel(queue, IDR, MACB_RX_INT_FLAGS | 1290 queue_writel(queue, IDR, bp->rx_intr_mask |
1292 MACB_TX_INT_FLAGS | 1291 MACB_TX_INT_FLAGS |
1293 MACB_BIT(HRESP)); 1292 MACB_BIT(HRESP));
1294 } 1293 }
@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data)
1318 1317
1319 /* Enable interrupts */ 1318 /* Enable interrupts */
1320 queue_writel(queue, IER, 1319 queue_writel(queue, IER,
1321 MACB_RX_INT_FLAGS | 1320 bp->rx_intr_mask |
1322 MACB_TX_INT_FLAGS | 1321 MACB_TX_INT_FLAGS |
1323 MACB_BIT(HRESP)); 1322 MACB_BIT(HRESP));
1324 } 1323 }
@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1372 (unsigned int)(queue - bp->queues), 1371 (unsigned int)(queue - bp->queues),
1373 (unsigned long)status); 1372 (unsigned long)status);
1374 1373
1375 if (status & MACB_RX_INT_FLAGS) { 1374 if (status & bp->rx_intr_mask) {
1376 /* There's no point taking any more interrupts 1375 /* There's no point taking any more interrupts
1377 * until we have processed the buffers. The 1376 * until we have processed the buffers. The
1378 * scheduling call may fail if the poll routine 1377 * scheduling call may fail if the poll routine
1379 * is already scheduled, so disable interrupts 1378 * is already scheduled, so disable interrupts
1380 * now. 1379 * now.
1381 */ 1380 */
1382 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1381 queue_writel(queue, IDR, bp->rx_intr_mask);
1383 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1382 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1384 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1383 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1385 1384
@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1412 /* There is a hardware issue under heavy load where DMA can 1411 /* There is a hardware issue under heavy load where DMA can
1413 * stop, this causes endless "used buffer descriptor read" 1412 * stop, this causes endless "used buffer descriptor read"
1414 * interrupts but it can be cleared by re-enabling RX. See 1413 * interrupts but it can be cleared by re-enabling RX. See
1415 * the at91 manual, section 41.3.1 or the Zynq manual 1414 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1416 * section 16.7.4 for details. 1415 * section 16.7.4 for details. RXUBR is only enabled for
1416 * these two versions.
1417 */ 1417 */
1418 if (status & MACB_BIT(RXUBR)) { 1418 if (status & MACB_BIT(RXUBR)) {
1419 ctrl = macb_readl(bp, NCR); 1419 ctrl = macb_readl(bp, NCR);
@@ -2259,7 +2259,7 @@ static void macb_init_hw(struct macb *bp)
2259 2259
2260 /* Enable interrupts */ 2260 /* Enable interrupts */
2261 queue_writel(queue, IER, 2261 queue_writel(queue, IER,
2262 MACB_RX_INT_FLAGS | 2262 bp->rx_intr_mask |
2263 MACB_TX_INT_FLAGS | 2263 MACB_TX_INT_FLAGS |
2264 MACB_BIT(HRESP)); 2264 MACB_BIT(HRESP));
2265 } 2265 }
@@ -3907,6 +3907,7 @@ static const struct macb_config sama5d4_config = {
3907}; 3907};
3908 3908
3909static const struct macb_config emac_config = { 3909static const struct macb_config emac_config = {
3910 .caps = MACB_CAPS_NEEDS_RSTONUBR,
3910 .clk_init = at91ether_clk_init, 3911 .clk_init = at91ether_clk_init,
3911 .init = at91ether_init, 3912 .init = at91ether_init,
3912}; 3913};
@@ -3928,7 +3929,8 @@ static const struct macb_config zynqmp_config = {
3928}; 3929};
3929 3930
3930static const struct macb_config zynq_config = { 3931static const struct macb_config zynq_config = {
3931 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3932 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
3933 MACB_CAPS_NEEDS_RSTONUBR,
3932 .dma_burst_length = 16, 3934 .dma_burst_length = 16,
3933 .clk_init = macb_clk_init, 3935 .clk_init = macb_clk_init,
3934 .init = macb_init, 3936 .init = macb_init,
@@ -4083,6 +4085,10 @@ static int macb_probe(struct platform_device *pdev)
4083 macb_dma_desc_get_size(bp); 4085 macb_dma_desc_get_size(bp);
4084 } 4086 }
4085 4087
4088 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4089 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4090 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4091
4086 mac = of_get_mac_address(np); 4092 mac = of_get_mac_address(np);
4087 if (mac) { 4093 if (mac) {
4088 ether_addr_copy(bp->dev->dev_addr, mac); 4094 ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index 809a155eb193..f6d244c663fd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -9,8 +9,9 @@ config FSL_DPAA2_ETH
9 9
10config FSL_DPAA2_PTP_CLOCK 10config FSL_DPAA2_PTP_CLOCK
11 tristate "Freescale DPAA2 PTP Clock" 11 tristate "Freescale DPAA2 PTP Clock"
12 depends on FSL_DPAA2_ETH && POSIX_TIMERS 12 depends on FSL_DPAA2_ETH
13 select PTP_1588_CLOCK 13 imply PTP_1588_CLOCK
14 default y
14 help 15 help
15 This driver adds support for using the DPAA2 1588 timer module 16 This driver adds support for using the DPAA2 1588 timer module
16 as a PTP clock. 17 as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ae0f88bce9aa..2370dc204202 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3467,7 +3467,7 @@ fec_probe(struct platform_device *pdev)
3467 if (ret) 3467 if (ret)
3468 goto failed_clk_ipg; 3468 goto failed_clk_ipg;
3469 3469
3470 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3470 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3471 if (!IS_ERR(fep->reg_phy)) { 3471 if (!IS_ERR(fep->reg_phy)) {
3472 ret = regulator_enable(fep->reg_phy); 3472 ret = regulator_enable(fep->reg_phy);
3473 if (ret) { 3473 if (ret) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5b33238c6680..60e7d7ae3787 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2418,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2418out_notify_fail: 2418out_notify_fail:
2419 (void)cancel_work_sync(&priv->service_task); 2419 (void)cancel_work_sync(&priv->service_task);
2420out_read_prop_fail: 2420out_read_prop_fail:
2421 /* safe for ACPI FW */
2422 of_node_put(to_of_node(priv->fwnode));
2421 free_netdev(ndev); 2423 free_netdev(ndev);
2422 return ret; 2424 return ret;
2423} 2425}
@@ -2447,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2447 set_bit(NIC_STATE_REMOVING, &priv->state); 2449 set_bit(NIC_STATE_REMOVING, &priv->state);
2448 (void)cancel_work_sync(&priv->service_task); 2450 (void)cancel_work_sync(&priv->service_task);
2449 2451
2452 /* safe for ACPI FW */
2453 of_node_put(to_of_node(priv->fwnode));
2454
2450 free_netdev(ndev); 2455 free_netdev(ndev);
2451 return 0; 2456 return 0;
2452} 2457}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 8e9b95871d30..ce15d2350db9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
1157 */ 1157 */
1158static int hns_nic_nway_reset(struct net_device *netdev) 1158static int hns_nic_nway_reset(struct net_device *netdev)
1159{ 1159{
1160 int ret = 0;
1161 struct phy_device *phy = netdev->phydev; 1160 struct phy_device *phy = netdev->phydev;
1162 1161
1163 if (netif_running(netdev)) { 1162 if (!netif_running(netdev))
1164 /* if autoneg is disabled, don't restart auto-negotiation */ 1163 return 0;
1165 if (phy && phy->autoneg == AUTONEG_ENABLE)
1166 ret = genphy_restart_aneg(phy);
1167 }
1168 1164
1169 return ret; 1165 if (!phy)
1166 return -EOPNOTSUPP;
1167
1168 if (phy->autoneg != AUTONEG_ENABLE)
1169 return -EINVAL;
1170
1171 return genphy_restart_aneg(phy);
1170} 1172}
1171 1173
1172static u32 1174static u32
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e08452d8c..baf5cc251f32 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
321 } 321 }
322 322
323 hns_mdio_cmd_write(mdio_dev, is_c45, 323 hns_mdio_cmd_write(mdio_dev, is_c45,
324 MDIO_C45_WRITE_ADDR, phy_id, devad); 324 MDIO_C45_READ, phy_id, devad);
325 } 325 }
326 326
327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ 327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index d719668a6684..92929750f832 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1310 dev->stats.tx_aborted_errors++; 1310 dev->stats.tx_aborted_errors++;
1311 } 1311 }
1312 1312
1313 dev_kfree_skb_irq(skb); 1313 dev_consume_skb_irq(skb);
1314 1314
1315 tx_cmd->cmd.command = 0; /* Mark free */ 1315 tx_cmd->cmd.command = 0; /* Mark free */
1316 break; 1316 break;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 098d8764c0ea..dd71d5db7274 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1313 unsigned long lpar_rc; 1313 unsigned long lpar_rc;
1314 u16 mss = 0; 1314 u16 mss = 0;
1315 1315
1316restart_poll:
1317 while (frames_processed < budget) { 1316 while (frames_processed < budget) {
1318 if (!ibmveth_rxq_pending_buffer(adapter)) 1317 if (!ibmveth_rxq_pending_buffer(adapter))
1319 break; 1318 break;
@@ -1401,7 +1400,6 @@ restart_poll:
1401 napi_reschedule(napi)) { 1400 napi_reschedule(napi)) {
1402 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1401 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1403 VIO_IRQ_DISABLE); 1402 VIO_IRQ_DISABLE);
1404 goto restart_poll;
1405 } 1403 }
1406 } 1404 }
1407 1405
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index db909b6069b5..65f8a4b6ed0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
306 306
307 if (entries_per_copy < entries) { 307 if (entries_per_copy < entries) {
308 for (i = 0; i < entries / entries_per_copy; i++) { 308 for (i = 0; i < entries / entries_per_copy; i++) {
309 err = copy_to_user(buf, init_ents, PAGE_SIZE); 309 err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
310 -EFAULT : 0;
310 if (err) 311 if (err)
311 goto out; 312 goto out;
312 313
313 buf += PAGE_SIZE; 314 buf += PAGE_SIZE;
314 } 315 }
315 } else { 316 } else {
316 err = copy_to_user(buf, init_ents, entries * cqe_size); 317 err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
318 -EFAULT : 0;
317 } 319 }
318 320
319out: 321out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7df728f1e5b5..6e501af0e532 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2067{ 2067{
2068 struct mlx4_cmd_mailbox *mailbox; 2068 struct mlx4_cmd_mailbox *mailbox;
2069 __be32 *outbox; 2069 __be32 *outbox;
2070 u64 qword_field;
2070 u32 dword_field; 2071 u32 dword_field;
2071 int err; 2072 u16 word_field;
2072 u8 byte_field; 2073 u8 byte_field;
2074 int err;
2073 static const u8 a0_dmfs_query_hw_steering[] = { 2075 static const u8 a0_dmfs_query_hw_steering[] = {
2074 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2076 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
2075 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2077 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2097 2099
2098 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2100 /* QPC/EEC/CQC/EQC/RDMARC attributes */
2099 2101
2100 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2102 MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
2101 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2103 param->qpc_base = qword_field & ~((u64)0x1f);
2102 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2104 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
2103 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2105 param->log_num_qps = byte_field & 0x1f;
2104 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2106 MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
2105 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2107 param->srqc_base = qword_field & ~((u64)0x1f);
2106 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2108 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
2107 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2109 param->log_num_srqs = byte_field & 0x1f;
2108 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2110 MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
2109 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2111 param->cqc_base = qword_field & ~((u64)0x1f);
2110 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2112 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
2111 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2113 param->log_num_cqs = byte_field & 0x1f;
2112 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2114 MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
2115 param->altc_base = qword_field;
2116 MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
2117 param->auxc_base = qword_field;
2118 MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
2119 param->eqc_base = qword_field & ~((u64)0x1f);
2120 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
2121 param->log_num_eqs = byte_field & 0x1f;
2122 MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
2123 param->num_sys_eqs = word_field & 0xfff;
2124 MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
2125 param->rdmarc_base = qword_field & ~((u64)0x1f);
2126 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
2127 param->log_rd_per_qp = byte_field & 0x7;
2113 2128
2114 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2129 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
2115 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2130 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2128 /* steering attributes */ 2143 /* steering attributes */
2129 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2144 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2130 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2145 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
2131 MLX4_GET(param->log_mc_entry_sz, outbox, 2146 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
2132 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2147 param->log_mc_entry_sz = byte_field & 0x1f;
2133 MLX4_GET(param->log_mc_table_sz, outbox, 2148 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
2134 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2149 param->log_mc_table_sz = byte_field & 0x1f;
2135 MLX4_GET(byte_field, outbox, 2150 MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
2136 INIT_HCA_FS_A0_OFFSET);
2137 param->dmfs_high_steer_mode = 2151 param->dmfs_high_steer_mode =
2138 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2152 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
2139 } else { 2153 } else {
2140 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2154 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
2141 MLX4_GET(param->log_mc_entry_sz, outbox, 2155 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2142 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2156 param->log_mc_entry_sz = byte_field & 0x1f;
2143 MLX4_GET(param->log_mc_hash_sz, outbox, 2157 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2144 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2158 param->log_mc_hash_sz = byte_field & 0x1f;
2145 MLX4_GET(param->log_mc_table_sz, outbox, 2159 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2146 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2160 param->log_mc_table_sz = byte_field & 0x1f;
2147 } 2161 }
2148 2162
2149 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2163 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2167 /* TPT attributes */ 2181 /* TPT attributes */
2168 2182
2169 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2183 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
2170 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2184 MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
2171 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2185 param->mw_enabled = byte_field >> 7;
2186 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
2187 param->log_mpt_sz = byte_field & 0x3f;
2172 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2188 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
2173 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2189 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
2174 2190
2175 /* UAR attributes */ 2191 /* UAR attributes */
2176 2192
2177 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2193 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2178 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2194 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
2195 param->log_uar_sz = byte_field & 0xf;
2179 2196
2180 /* phv_check enable */ 2197 /* phv_check enable */
2181 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2198 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8cfd2ec7c0a2..01819e5c9975 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -950,7 +950,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
950 if (params->rx_dim_enabled) 950 if (params->rx_dim_enabled)
951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
952 952
953 if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) 953 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); 954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
955 955
956 return 0; 956 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 04736212a21c..f2573c2d2b5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1126,9 +1126,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1126 struct mlx5e_priv *priv = netdev_priv(dev); 1126 struct mlx5e_priv *priv = netdev_priv(dev);
1127 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1127 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1128 struct mlx5_eswitch_rep *rep = rpriv->rep; 1128 struct mlx5_eswitch_rep *rep = rpriv->rep;
1129 int ret; 1129 int ret, pf_num;
1130
1131 ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
1132 if (ret)
1133 return ret;
1134
1135 if (rep->vport == FDB_UPLINK_VPORT)
1136 ret = snprintf(buf, len, "p%d", pf_num);
1137 else
1138 ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
1130 1139
1131 ret = snprintf(buf, len, "%d", rep->vport - 1);
1132 if (ret >= len) 1140 if (ret >= len)
1133 return -EOPNOTSUPP; 1141 return -EOPNOTSUPP;
1134 1142
@@ -1285,6 +1293,18 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1285 return 0; 1293 return 0;
1286} 1294}
1287 1295
1296static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1297 __be16 vlan_proto)
1298{
1299 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1300
1301 if (vlan != 0)
1302 return -EOPNOTSUPP;
1303
1304 /* allow setting 0-vid for compatibility with libvirt */
1305 return 0;
1306}
1307
1288static const struct switchdev_ops mlx5e_rep_switchdev_ops = { 1308static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
1289 .switchdev_port_attr_get = mlx5e_attr_get, 1309 .switchdev_port_attr_get = mlx5e_attr_get,
1290}; 1310};
@@ -1319,6 +1339,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1319 .ndo_set_vf_rate = mlx5e_set_vf_rate, 1339 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1320 .ndo_get_vf_config = mlx5e_get_vf_config, 1340 .ndo_get_vf_config = mlx5e_get_vf_config,
1321 .ndo_get_vf_stats = mlx5e_get_vf_stats, 1341 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1342 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1322}; 1343};
1323 1344
1324bool mlx5e_eswitch_rep(struct net_device *netdev) 1345bool mlx5e_eswitch_rep(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a44ea7b85614..5b492b67f4e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1134,13 +1134,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1134 int err = 0; 1134 int err = 0;
1135 u8 *smac_v; 1135 u8 *smac_v;
1136 1136
1137 if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1138 mlx5_core_warn(esw->dev,
1139 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1140 vport->vport);
1141 return -EPERM;
1142 }
1143
1144 esw_vport_cleanup_ingress_rules(esw, vport); 1137 esw_vport_cleanup_ingress_rules(esw, vport);
1145 1138
1146 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { 1139 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1728,7 +1721,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1728 int vport_num; 1721 int vport_num;
1729 int err; 1722 int err;
1730 1723
1731 if (!MLX5_ESWITCH_MANAGER(dev)) 1724 if (!MLX5_VPORT_MANAGER(dev))
1732 return 0; 1725 return 0;
1733 1726
1734 esw_info(dev, 1727 esw_info(dev,
@@ -1797,7 +1790,7 @@ abort:
1797 1790
1798void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1791void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1799{ 1792{
1800 if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) 1793 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1801 return; 1794 return;
1802 1795
1803 esw_info(esw->dev, "cleanup\n"); 1796 esw_info(esw->dev, "cleanup\n");
@@ -1827,13 +1820,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1827 mutex_lock(&esw->state_lock); 1820 mutex_lock(&esw->state_lock);
1828 evport = &esw->vports[vport]; 1821 evport = &esw->vports[vport];
1829 1822
1830 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { 1823 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1831 mlx5_core_warn(esw->dev, 1824 mlx5_core_warn(esw->dev,
1832 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", 1825 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1833 vport); 1826 vport);
1834 err = -EPERM;
1835 goto unlock;
1836 }
1837 1827
1838 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); 1828 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1839 if (err) { 1829 if (err) {
@@ -1979,6 +1969,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1979 evport = &esw->vports[vport]; 1969 evport = &esw->vports[vport];
1980 pschk = evport->info.spoofchk; 1970 pschk = evport->info.spoofchk;
1981 evport->info.spoofchk = spoofchk; 1971 evport->info.spoofchk = spoofchk;
1972 if (pschk && !is_valid_ether_addr(evport->info.mac))
1973 mlx5_core_warn(esw->dev,
1974 "Spoofchk in set while MAC is invalid, vport(%d)\n",
1975 evport->vport);
1982 if (evport->enabled && esw->mode == SRIOV_LEGACY) 1976 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1983 err = esw_vport_ingress_config(esw, evport); 1977 err = esw_vport_ingress_config(esw, evport);
1984 if (err) 1978 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 3a6baed722d8..2d223385dc81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -616,6 +616,27 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
616 } 616 }
617} 617}
618 618
619int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
620{
621 struct mlx5_lag *ldev;
622 int n;
623
624 ldev = mlx5_lag_dev_get(dev);
625 if (!ldev) {
626 mlx5_core_warn(dev, "no lag device, can't get pf num\n");
627 return -EINVAL;
628 }
629
630 for (n = 0; n < MLX5_MAX_PORTS; n++)
631 if (ldev->pf[n].dev == dev) {
632 *pf_num = n;
633 return 0;
634 }
635
636 mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
637 return -EINVAL;
638}
639
619/* Must be called with intf_mutex held */ 640/* Must be called with intf_mutex held */
620void mlx5_lag_remove(struct mlx5_core_dev *dev) 641void mlx5_lag_remove(struct mlx5_core_dev *dev)
621{ 642{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index c68dcea5985b..5300b0b6d836 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -187,6 +187,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
187 MLX5_CAP_GEN(dev, lag_master); 187 MLX5_CAP_GEN(dev, lag_master);
188} 188}
189 189
190int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
191
190void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); 192void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
191void mlx5_lag_update(struct mlx5_core_dev *dev); 193void mlx5_lag_update(struct mlx5_core_dev *dev);
192 194
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 388f205a497f..370ca94b6775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *
44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) 44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
45{ 45{
46 struct mlx5_core_rsc_common *common; 46 struct mlx5_core_rsc_common *common;
47 unsigned long flags;
47 48
48 spin_lock(&table->lock); 49 spin_lock_irqsave(&table->lock, flags);
49 50
50 common = radix_tree_lookup(&table->tree, rsn); 51 common = radix_tree_lookup(&table->tree, rsn);
51 if (common) 52 if (common)
52 atomic_inc(&common->refcount); 53 atomic_inc(&common->refcount);
53 54
54 spin_unlock(&table->lock); 55 spin_unlock_irqrestore(&table->lock, flags);
55 56
56 return common; 57 return common;
57} 58}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 8f6551421945..2ecaaaa4469a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
795 795
796/* get pq index according to PQ_FLAGS */ 796/* get pq index according to PQ_FLAGS */
797static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 797static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
798 u32 pq_flags) 798 unsigned long pq_flags)
799{ 799{
800 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 800 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
801 801
802 /* Can't have multiple flags set here */ 802 /* Can't have multiple flags set here */
803 if (bitmap_weight((unsigned long *)&pq_flags, 803 if (bitmap_weight(&pq_flags,
804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) { 804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
805 DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); 805 DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
806 goto err; 806 goto err;
807 } 807 }
808 808
809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { 809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
810 DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); 810 DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
811 goto err; 811 goto err;
812 } 812 }
813 813
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93906..e68ca83ae915 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
611 611
612 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
613 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
614 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
615
612 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 616 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
613 !!(accept_filter & QED_ACCEPT_BCAST)); 617 !!(accept_filter & QED_ACCEPT_BCAST));
614 618
@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
744 return rc; 748 return rc;
745 } 749 }
746 750
751 if (p_params->update_ctl_frame_check) {
752 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
753 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
754 }
755
747 /* Update mcast bins for VFs, PF doesn't use this functionality */ 756 /* Update mcast bins for VFs, PF doesn't use this functionality */
748 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 757 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
749 758
@@ -2688,7 +2697,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2688 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2697 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2689 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2698 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2690 QED_ACCEPT_MCAST_UNMATCHED; 2699 QED_ACCEPT_MCAST_UNMATCHED;
2691 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2700 accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2701 QED_ACCEPT_MCAST_UNMATCHED;
2692 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2702 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2693 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2703 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2694 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2704 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f1095d17..7127d5aaac42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
219 struct qed_rss_params *rss_params; 219 struct qed_rss_params *rss_params;
220 struct qed_filter_accept_flags accept_flags; 220 struct qed_filter_accept_flags accept_flags;
221 struct qed_sge_tpa_params *sge_tpa_params; 221 struct qed_sge_tpa_params *sge_tpa_params;
222 u8 update_ctl_frame_check;
223 u8 mac_chk_en;
224 u8 ethtype_chk_en;
222}; 225};
223 226
224int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 227int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index d9237c65a838..b5f419b71287 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2451,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2451{ 2451{
2452 struct qed_ll2_tx_pkt_info pkt; 2452 struct qed_ll2_tx_pkt_info pkt;
2453 const skb_frag_t *frag; 2453 const skb_frag_t *frag;
2454 u8 flags = 0, nr_frags;
2454 int rc = -EINVAL, i; 2455 int rc = -EINVAL, i;
2455 dma_addr_t mapping; 2456 dma_addr_t mapping;
2456 u16 vlan = 0; 2457 u16 vlan = 0;
2457 u8 flags = 0;
2458 2458
2459 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2459 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2460 DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); 2460 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2461 return -EINVAL; 2461 return -EINVAL;
2462 } 2462 }
2463 2463
2464 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { 2464 /* Cache number of fragments from SKB since SKB may be freed by
2465 * the completion routine after calling qed_ll2_prepare_tx_packet()
2466 */
2467 nr_frags = skb_shinfo(skb)->nr_frags;
2468
2469 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2465 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 2470 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2466 1 + skb_shinfo(skb)->nr_frags); 2471 1 + nr_frags);
2467 return -EINVAL; 2472 return -EINVAL;
2468 } 2473 }
2469 2474
@@ -2485,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2485 } 2490 }
2486 2491
2487 memset(&pkt, 0, sizeof(pkt)); 2492 memset(&pkt, 0, sizeof(pkt));
2488 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; 2493 pkt.num_of_bds = 1 + nr_frags;
2489 pkt.vlan = vlan; 2494 pkt.vlan = vlan;
2490 pkt.bd_flags = flags; 2495 pkt.bd_flags = flags;
2491 pkt.tx_dest = QED_LL2_TX_DEST_NW; 2496 pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2496,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2496 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) 2501 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2497 pkt.remove_stag = true; 2502 pkt.remove_stag = true;
2498 2503
2504 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2505 * there are no fragments in the skb and subsequently the completion
2506 * routine may run and free the SKB, so no dereferencing the SKB
2507 * beyond this point unless skb has any fragments.
2508 */
2499 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, 2509 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2500 &pkt, 1); 2510 &pkt, 1);
2501 if (rc) 2511 if (rc)
2502 goto err; 2512 goto err;
2503 2513
2504 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2514 for (i = 0; i < nr_frags; i++) {
2505 frag = &skb_shinfo(skb)->frags[i]; 2515 frag = &skb_shinfo(skb)->frags[i];
2506 2516
2507 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, 2517 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290fa0f30..71a7af134dd8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1969 params.vport_id = vf->vport_id; 1969 params.vport_id = vf->vport_id;
1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1971 params.mtu = vf->mtu; 1971 params.mtu = vf->mtu;
1972 params.check_mac = true; 1972
1973 /* Non trusted VFs should enable control frame filtering */
1974 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1973 1975
1974 rc = qed_sp_eth_vport_start(p_hwfn, &params); 1976 rc = qed_sp_eth_vport_start(p_hwfn, &params);
1975 if (rc) { 1977 if (rc) {
@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5130 params.opaque_fid = vf->opaque_fid; 5132 params.opaque_fid = vf->opaque_fid;
5131 params.vport_id = vf->vport_id; 5133 params.vport_id = vf->vport_id;
5132 5134
5135 params.update_ctl_frame_check = 1;
5136 params.mac_chk_en = !vf_info->is_trusted_configured;
5137
5133 if (vf_info->rx_accept_mode & mask) { 5138 if (vf_info->rx_accept_mode & mask) {
5134 flags->update_rx_mode_config = 1; 5139 flags->update_rx_mode_config = 1;
5135 flags->rx_accept_filter = vf_info->rx_accept_mode; 5140 flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5147 } 5152 }
5148 5153
5149 if (flags->update_rx_mode_config || 5154 if (flags->update_rx_mode_config ||
5150 flags->update_tx_mode_config) 5155 flags->update_tx_mode_config ||
5156 params.update_ctl_frame_check)
5151 qed_sp_vport_update(hwfn, &params, 5157 qed_sp_vport_update(hwfn, &params,
5152 QED_SPQ_MODE_EBLOCK, NULL); 5158 QED_SPQ_MODE_EBLOCK, NULL);
5153 } 5159 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index b6cccf44bf40..5dda547772c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
263 struct vf_pf_resc_request *p_resc; 263 struct vf_pf_resc_request *p_resc;
264 u8 retry_cnt = VF_ACQUIRE_THRESH;
264 bool resources_acquired = false; 265 bool resources_acquired = false;
265 struct vfpf_acquire_tlv *req; 266 struct vfpf_acquire_tlv *req;
266 int rc = 0, attempts = 0; 267 int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
314 315
315 /* send acquire request */ 316 /* send acquire request */
316 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 317 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
318
319 /* Re-try acquire in case of vf-pf hw channel timeout */
320 if (retry_cnt && rc == -EBUSY) {
321 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
322 "VF retrying to acquire due to VPC timeout\n");
323 retry_cnt--;
324 continue;
325 }
326
317 if (rc) 327 if (rc)
318 goto exit; 328 goto exit;
319 329
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 44f6e4873aad..4f910c4f67b0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp)
691 } 691 }
692 bytes_compl += skb->len; 692 bytes_compl += skb->len;
693 pkts_compl++; 693 pkts_compl++;
694 dev_kfree_skb_irq(skb); 694 dev_consume_skb_irq(skb);
695 } 695 }
696 696
697 cp->tx_skb[tx_tail] = NULL; 697 cp->tx_skb[tx_tail] = NULL;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ffc1ada4e6da..d28c8f9ca55b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
343 int i; 343 int i;
344 344
345 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + 345 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
346 ETH_HLEN + VLAN_HLEN; 346 ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
347 347
348 /* Allocate RX and TX skb rings */ 348 /* Allocate RX and TX skb rings */
349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
524{ 524{
525 u8 *hw_csum; 525 u8 *hw_csum;
526 526
527 /* The hardware checksum is 2 bytes appended to packet data */ 527 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
528 if (unlikely(skb->len < 2)) 528 * appended to packet data
529 */
530 if (unlikely(skb->len < sizeof(__sum16)))
529 return; 531 return;
530 hw_csum = skb_tail_pointer(skb) - 2; 532 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
531 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 533 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
532 skb->ip_summed = CHECKSUM_COMPLETE; 534 skb->ip_summed = CHECKSUM_COMPLETE;
533 skb_trim(skb, skb->len - 2); 535 skb_trim(skb, skb->len - sizeof(__sum16));
534} 536}
535 537
536/* Packet receive function for Ethernet AVB */ 538/* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index b6a50058bb8d..2f2bda68d861 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6046 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, 6046 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
6047 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } 6047 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
6048}; 6048};
6049#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
6049 6050
6050static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 6051static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6051 struct efx_mcdi_mtd_partition *part, 6052 struct efx_mcdi_mtd_partition *part,
6052 unsigned int type) 6053 unsigned int type,
6054 unsigned long *found)
6053{ 6055{
6054 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 6056 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6055 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 6057 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6056 const struct efx_ef10_nvram_type_info *info; 6058 const struct efx_ef10_nvram_type_info *info;
6057 size_t size, erase_size, outlen; 6059 size_t size, erase_size, outlen;
6060 int type_idx = 0;
6058 bool protected; 6061 bool protected;
6059 int rc; 6062 int rc;
6060 6063
6061 for (info = efx_ef10_nvram_types; ; info++) { 6064 for (type_idx = 0; ; type_idx++) {
6062 if (info == 6065 if (type_idx == EF10_NVRAM_PARTITION_COUNT)
6063 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
6064 return -ENODEV; 6066 return -ENODEV;
6067 info = efx_ef10_nvram_types + type_idx;
6065 if ((type & ~info->type_mask) == info->type) 6068 if ((type & ~info->type_mask) == info->type)
6066 break; 6069 break;
6067 } 6070 }
@@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6074 if (protected) 6077 if (protected)
6075 return -ENODEV; /* hide it */ 6078 return -ENODEV; /* hide it */
6076 6079
6080 /* If we've already exposed a partition of this type, hide this
6081 * duplicate. All operations on MTDs are keyed by the type anyway,
6082 * so we can't act on the duplicate.
6083 */
6084 if (__test_and_set_bit(type_idx, found))
6085 return -EEXIST;
6086
6077 part->nvram_type = type; 6087 part->nvram_type = type;
6078 6088
6079 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 6089 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
@@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6105static int efx_ef10_mtd_probe(struct efx_nic *efx) 6115static int efx_ef10_mtd_probe(struct efx_nic *efx)
6106{ 6116{
6107 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6117 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6118 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT);
6108 struct efx_mcdi_mtd_partition *parts; 6119 struct efx_mcdi_mtd_partition *parts;
6109 size_t outlen, n_parts_total, i, n_parts; 6120 size_t outlen, n_parts_total, i, n_parts;
6110 unsigned int type; 6121 unsigned int type;
@@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
6133 for (i = 0; i < n_parts_total; i++) { 6144 for (i = 0; i < n_parts_total; i++) {
6134 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 6145 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6135 i); 6146 i);
6136 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); 6147 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
6137 if (rc == 0) 6148 found);
6138 n_parts++; 6149 if (rc == -EEXIST || rc == -ENODEV)
6139 else if (rc != -ENODEV) 6150 continue;
6151 if (rc)
6140 goto fail; 6152 goto fail;
6153 n_parts++;
6141 } 6154 }
6142 6155
6143 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 6156 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b923362ee55..3b174eae77c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
1342 } 1342 }
1343 1343
1344 ret = phy_power_on(bsp_priv, true); 1344 ret = phy_power_on(bsp_priv, true);
1345 if (ret) 1345 if (ret) {
1346 gmac_clk_enable(bsp_priv, false);
1346 return ret; 1347 return ret;
1348 }
1347 1349
1348 pm_runtime_enable(dev); 1350 pm_runtime_enable(dev);
1349 pm_runtime_get_sync(dev); 1351 pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9020b084b953..7ec4eb74fe21 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,22 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * 3 *
4 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2004 Sun Microsystems Inc.
5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 *
20 * This driver uses the sungem driver (c) David Miller 7 * This driver uses the sungem driver (c) David Miller
21 * (davem@redhat.com) as its basis. 8 * (davem@redhat.com) as its basis.
22 * 9 *
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 13f3860496a8..ae5f05f03f88 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0+ */
2/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ 2/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
3 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. 3 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
4 * 4 *
5 * Copyright (C) 2004 Sun Microsystems Inc. 5 * Copyright (C) 2004 Sun Microsystems Inc.
6 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) 6 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 *
21 * vendor id: 0x108E (Sun Microsystems, Inc.) 8 * vendor id: 0x108E (Sun Microsystems, Inc.)
22 * device id: 0xabba (Cassini) 9 * device id: 0xabba (Cassini)
23 * revision ids: 0x01 = Cassini 10 * revision ids: 0x01 = Cassini
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 810dfc7de1f9..e2d47b24a869 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
608 netdev_dbg(dev, "sent 0x%p, len=%d\n", 608 netdev_dbg(dev, "sent 0x%p, len=%d\n",
609 desc->skb, desc->skb->len); 609 desc->skb, desc->skb->len);
610 610
611 dev_kfree_skb_irq(desc->skb); 611 dev_consume_skb_irq(desc->skb);
612 desc->skb = NULL; 612 desc->skb = NULL;
613 if (__netif_subqueue_stopped(dev, queue)) 613 if (__netif_subqueue_stopped(dev, queue))
614 netif_wake_subqueue(dev, queue); 614 netif_wake_subqueue(dev, queue);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ef6f766f6389..e859ae2e42d5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@ struct hv_netvsc_packet {
144 u32 total_data_buflen; 144 u32 total_data_buflen;
145}; 145};
146 146
147#define NETVSC_HASH_KEYLEN 40
148
147struct netvsc_device_info { 149struct netvsc_device_info {
148 unsigned char mac_adr[ETH_ALEN]; 150 unsigned char mac_adr[ETH_ALEN];
149 u32 num_chn; 151 u32 num_chn;
@@ -151,6 +153,8 @@ struct netvsc_device_info {
151 u32 recv_sections; 153 u32 recv_sections;
152 u32 send_section_size; 154 u32 send_section_size;
153 u32 recv_section_size; 155 u32 recv_section_size;
156
157 u8 rss_key[NETVSC_HASH_KEYLEN];
154}; 158};
155 159
156enum rndis_device_state { 160enum rndis_device_state {
@@ -160,8 +164,6 @@ enum rndis_device_state {
160 RNDIS_DEV_DATAINITIALIZED, 164 RNDIS_DEV_DATAINITIALIZED,
161}; 165};
162 166
163#define NETVSC_HASH_KEYLEN 40
164
165struct rndis_device { 167struct rndis_device {
166 struct net_device *ndev; 168 struct net_device *ndev;
167 169
@@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net,
209void netvsc_channel_cb(void *context); 211void netvsc_channel_cb(void *context);
210int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
211 213
212int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); 214int rndis_set_subchannel(struct net_device *ndev,
215 struct netvsc_device *nvdev,
216 struct netvsc_device_info *dev_info);
213int rndis_filter_open(struct netvsc_device *nvdev); 217int rndis_filter_open(struct netvsc_device *nvdev);
214int rndis_filter_close(struct netvsc_device *nvdev); 218int rndis_filter_close(struct netvsc_device *nvdev);
215struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 219struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type {
1177 1181
1178enum rndis_per_pkt_info_interal_type { 1182enum rndis_per_pkt_info_interal_type {
1179 RNDIS_PKTINFO_ID = 1, 1183 RNDIS_PKTINFO_ID = 1,
1180 /* Add more memebers here */ 1184 /* Add more members here */
1181 1185
1182 RNDIS_PKTINFO_MAX 1186 RNDIS_PKTINFO_MAX
1183}; 1187};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 922054c1d544..813d195bbd57 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
84 84
85 rdev = nvdev->extension; 85 rdev = nvdev->extension;
86 if (rdev) { 86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev); 87 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
88 if (ret == 0) { 88 if (ret == 0) {
89 netif_device_attach(rdev->ndev); 89 netif_device_attach(rdev->ndev);
90 } else { 90 } else {
@@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context)
1331 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); 1331 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1332 1332
1333 if (napi_schedule_prep(&nvchan->napi)) { 1333 if (napi_schedule_prep(&nvchan->napi)) {
1334 /* disable interupts from host */ 1334 /* disable interrupts from host */
1335 hv_begin_read(rbi); 1335 hv_begin_read(rbi);
1336 1336
1337 __napi_schedule_irqoff(&nvchan->napi); 1337 __napi_schedule_irqoff(&nvchan->napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 91ed15ea5883..256adbd044f5 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
370{ 370{
371 int j = 0; 371 int j = 0;
372 372
373 /* Deal with compund pages by ignoring unused part 373 /* Deal with compound pages by ignoring unused part
374 * of the page. 374 * of the page.
375 */ 375 */
376 page += (offset >> PAGE_SHIFT); 376 page += (offset >> PAGE_SHIFT);
@@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net,
858 } 858 }
859} 859}
860 860
861/* Alloc struct netvsc_device_info, and initialize it from either existing
862 * struct netvsc_device, or from default values.
863 */
864static struct netvsc_device_info *netvsc_devinfo_get
865 (struct netvsc_device *nvdev)
866{
867 struct netvsc_device_info *dev_info;
868
869 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
870
871 if (!dev_info)
872 return NULL;
873
874 if (nvdev) {
875 dev_info->num_chn = nvdev->num_chn;
876 dev_info->send_sections = nvdev->send_section_cnt;
877 dev_info->send_section_size = nvdev->send_section_size;
878 dev_info->recv_sections = nvdev->recv_section_cnt;
879 dev_info->recv_section_size = nvdev->recv_section_size;
880
881 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
882 NETVSC_HASH_KEYLEN);
883 } else {
884 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
885 dev_info->send_sections = NETVSC_DEFAULT_TX;
886 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
887 dev_info->recv_sections = NETVSC_DEFAULT_RX;
888 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
889 }
890
891 return dev_info;
892}
893
861static int netvsc_detach(struct net_device *ndev, 894static int netvsc_detach(struct net_device *ndev,
862 struct netvsc_device *nvdev) 895 struct netvsc_device *nvdev)
863{ 896{
@@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev,
909 return PTR_ERR(nvdev); 942 return PTR_ERR(nvdev);
910 943
911 if (nvdev->num_chn > 1) { 944 if (nvdev->num_chn > 1) {
912 ret = rndis_set_subchannel(ndev, nvdev); 945 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
913 946
914 /* if unavailable, just proceed with one queue */ 947 /* if unavailable, just proceed with one queue */
915 if (ret) { 948 if (ret) {
@@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net,
943 struct net_device_context *net_device_ctx = netdev_priv(net); 976 struct net_device_context *net_device_ctx = netdev_priv(net);
944 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 977 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
945 unsigned int orig, count = channels->combined_count; 978 unsigned int orig, count = channels->combined_count;
946 struct netvsc_device_info device_info; 979 struct netvsc_device_info *device_info;
947 int ret; 980 int ret;
948 981
949 /* We do not support separate count for rx, tx, or other */ 982 /* We do not support separate count for rx, tx, or other */
@@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net,
962 995
963 orig = nvdev->num_chn; 996 orig = nvdev->num_chn;
964 997
965 memset(&device_info, 0, sizeof(device_info)); 998 device_info = netvsc_devinfo_get(nvdev);
966 device_info.num_chn = count; 999
967 device_info.send_sections = nvdev->send_section_cnt; 1000 if (!device_info)
968 device_info.send_section_size = nvdev->send_section_size; 1001 return -ENOMEM;
969 device_info.recv_sections = nvdev->recv_section_cnt; 1002
970 device_info.recv_section_size = nvdev->recv_section_size; 1003 device_info->num_chn = count;
971 1004
972 ret = netvsc_detach(net, nvdev); 1005 ret = netvsc_detach(net, nvdev);
973 if (ret) 1006 if (ret)
974 return ret; 1007 goto out;
975 1008
976 ret = netvsc_attach(net, &device_info); 1009 ret = netvsc_attach(net, device_info);
977 if (ret) { 1010 if (ret) {
978 device_info.num_chn = orig; 1011 device_info->num_chn = orig;
979 if (netvsc_attach(net, &device_info)) 1012 if (netvsc_attach(net, device_info))
980 netdev_err(net, "restoring channel setting failed\n"); 1013 netdev_err(net, "restoring channel setting failed\n");
981 } 1014 }
982 1015
1016out:
1017 kfree(device_info);
983 return ret; 1018 return ret;
984} 1019}
985 1020
@@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1048 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1083 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1049 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1084 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1050 int orig_mtu = ndev->mtu; 1085 int orig_mtu = ndev->mtu;
1051 struct netvsc_device_info device_info; 1086 struct netvsc_device_info *device_info;
1052 int ret = 0; 1087 int ret = 0;
1053 1088
1054 if (!nvdev || nvdev->destroy) 1089 if (!nvdev || nvdev->destroy)
1055 return -ENODEV; 1090 return -ENODEV;
1056 1091
1092 device_info = netvsc_devinfo_get(nvdev);
1093
1094 if (!device_info)
1095 return -ENOMEM;
1096
1057 /* Change MTU of underlying VF netdev first. */ 1097 /* Change MTU of underlying VF netdev first. */
1058 if (vf_netdev) { 1098 if (vf_netdev) {
1059 ret = dev_set_mtu(vf_netdev, mtu); 1099 ret = dev_set_mtu(vf_netdev, mtu);
1060 if (ret) 1100 if (ret)
1061 return ret; 1101 goto out;
1062 } 1102 }
1063 1103
1064 memset(&device_info, 0, sizeof(device_info));
1065 device_info.num_chn = nvdev->num_chn;
1066 device_info.send_sections = nvdev->send_section_cnt;
1067 device_info.send_section_size = nvdev->send_section_size;
1068 device_info.recv_sections = nvdev->recv_section_cnt;
1069 device_info.recv_section_size = nvdev->recv_section_size;
1070
1071 ret = netvsc_detach(ndev, nvdev); 1104 ret = netvsc_detach(ndev, nvdev);
1072 if (ret) 1105 if (ret)
1073 goto rollback_vf; 1106 goto rollback_vf;
1074 1107
1075 ndev->mtu = mtu; 1108 ndev->mtu = mtu;
1076 1109
1077 ret = netvsc_attach(ndev, &device_info); 1110 ret = netvsc_attach(ndev, device_info);
1078 if (ret) 1111 if (!ret)
1079 goto rollback; 1112 goto out;
1080
1081 return 0;
1082 1113
1083rollback:
1084 /* Attempt rollback to original MTU */ 1114 /* Attempt rollback to original MTU */
1085 ndev->mtu = orig_mtu; 1115 ndev->mtu = orig_mtu;
1086 1116
1087 if (netvsc_attach(ndev, &device_info)) 1117 if (netvsc_attach(ndev, device_info))
1088 netdev_err(ndev, "restoring mtu failed\n"); 1118 netdev_err(ndev, "restoring mtu failed\n");
1089rollback_vf: 1119rollback_vf:
1090 if (vf_netdev) 1120 if (vf_netdev)
1091 dev_set_mtu(vf_netdev, orig_mtu); 1121 dev_set_mtu(vf_netdev, orig_mtu);
1092 1122
1123out:
1124 kfree(device_info);
1093 return ret; 1125 return ret;
1094} 1126}
1095 1127
@@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1674{ 1706{
1675 struct net_device_context *ndevctx = netdev_priv(ndev); 1707 struct net_device_context *ndevctx = netdev_priv(ndev);
1676 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1708 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1677 struct netvsc_device_info device_info; 1709 struct netvsc_device_info *device_info;
1678 struct ethtool_ringparam orig; 1710 struct ethtool_ringparam orig;
1679 u32 new_tx, new_rx; 1711 u32 new_tx, new_rx;
1680 int ret = 0; 1712 int ret = 0;
@@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1694 new_rx == orig.rx_pending) 1726 new_rx == orig.rx_pending)
1695 return 0; /* no change */ 1727 return 0; /* no change */
1696 1728
1697 memset(&device_info, 0, sizeof(device_info)); 1729 device_info = netvsc_devinfo_get(nvdev);
1698 device_info.num_chn = nvdev->num_chn; 1730
1699 device_info.send_sections = new_tx; 1731 if (!device_info)
1700 device_info.send_section_size = nvdev->send_section_size; 1732 return -ENOMEM;
1701 device_info.recv_sections = new_rx; 1733
1702 device_info.recv_section_size = nvdev->recv_section_size; 1734 device_info->send_sections = new_tx;
1735 device_info->recv_sections = new_rx;
1703 1736
1704 ret = netvsc_detach(ndev, nvdev); 1737 ret = netvsc_detach(ndev, nvdev);
1705 if (ret) 1738 if (ret)
1706 return ret; 1739 goto out;
1707 1740
1708 ret = netvsc_attach(ndev, &device_info); 1741 ret = netvsc_attach(ndev, device_info);
1709 if (ret) { 1742 if (ret) {
1710 device_info.send_sections = orig.tx_pending; 1743 device_info->send_sections = orig.tx_pending;
1711 device_info.recv_sections = orig.rx_pending; 1744 device_info->recv_sections = orig.rx_pending;
1712 1745
1713 if (netvsc_attach(ndev, &device_info)) 1746 if (netvsc_attach(ndev, device_info))
1714 netdev_err(ndev, "restoring ringparam failed"); 1747 netdev_err(ndev, "restoring ringparam failed");
1715 } 1748 }
1716 1749
1750out:
1751 kfree(device_info);
1717 return ret; 1752 return ret;
1718} 1753}
1719 1754
@@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
2088 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 2123 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2089 return NOTIFY_DONE; 2124 return NOTIFY_DONE;
2090 2125
2091 /* if syntihetic interface is a different namespace, 2126 /* if synthetic interface is a different namespace,
2092 * then move the VF to that namespace; join will be 2127 * then move the VF to that namespace; join will be
2093 * done again in that context. 2128 * done again in that context.
2094 */ 2129 */
@@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev,
2167{ 2202{
2168 struct net_device *net = NULL; 2203 struct net_device *net = NULL;
2169 struct net_device_context *net_device_ctx; 2204 struct net_device_context *net_device_ctx;
2170 struct netvsc_device_info device_info; 2205 struct netvsc_device_info *device_info = NULL;
2171 struct netvsc_device *nvdev; 2206 struct netvsc_device *nvdev;
2172 int ret = -ENOMEM; 2207 int ret = -ENOMEM;
2173 2208
@@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev,
2214 netif_set_real_num_rx_queues(net, 1); 2249 netif_set_real_num_rx_queues(net, 1);
2215 2250
2216 /* Notify the netvsc driver of the new device */ 2251 /* Notify the netvsc driver of the new device */
2217 memset(&device_info, 0, sizeof(device_info)); 2252 device_info = netvsc_devinfo_get(NULL);
2218 device_info.num_chn = VRSS_CHANNEL_DEFAULT; 2253
2219 device_info.send_sections = NETVSC_DEFAULT_TX; 2254 if (!device_info) {
2220 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; 2255 ret = -ENOMEM;
2221 device_info.recv_sections = NETVSC_DEFAULT_RX; 2256 goto devinfo_failed;
2222 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; 2257 }
2223 2258
2224 nvdev = rndis_filter_device_add(dev, &device_info); 2259 nvdev = rndis_filter_device_add(dev, device_info);
2225 if (IS_ERR(nvdev)) { 2260 if (IS_ERR(nvdev)) {
2226 ret = PTR_ERR(nvdev); 2261 ret = PTR_ERR(nvdev);
2227 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2262 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2228 goto rndis_failed; 2263 goto rndis_failed;
2229 } 2264 }
2230 2265
2231 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2266 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2232 2267
2233 /* We must get rtnl lock before scheduling nvdev->subchan_work, 2268 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2234 * otherwise netvsc_subchan_work() can get rtnl lock first and wait 2269 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev,
2236 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() 2271 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2237 * -> ... -> device_add() -> ... -> __device_attach() can't get 2272 * -> ... -> device_add() -> ... -> __device_attach() can't get
2238 * the device lock, so all the subchannels can't be processed -- 2273 * the device lock, so all the subchannels can't be processed --
2239 * finally netvsc_subchan_work() hangs for ever. 2274 * finally netvsc_subchan_work() hangs forever.
2240 */ 2275 */
2241 rtnl_lock(); 2276 rtnl_lock();
2242 2277
@@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev,
2266 2301
2267 list_add(&net_device_ctx->list, &netvsc_dev_list); 2302 list_add(&net_device_ctx->list, &netvsc_dev_list);
2268 rtnl_unlock(); 2303 rtnl_unlock();
2304
2305 kfree(device_info);
2269 return 0; 2306 return 0;
2270 2307
2271register_failed: 2308register_failed:
2272 rtnl_unlock(); 2309 rtnl_unlock();
2273 rndis_filter_device_remove(dev, nvdev); 2310 rndis_filter_device_remove(dev, nvdev);
2274rndis_failed: 2311rndis_failed:
2312 kfree(device_info);
2313devinfo_failed:
2275 free_percpu(net_device_ctx->vf_stats); 2314 free_percpu(net_device_ctx->vf_stats);
2276no_stats: 2315no_stats:
2277 hv_set_drvdata(dev, NULL); 2316 hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8b537a049c1e..73b60592de06 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -774,8 +774,8 @@ cleanup:
774 return ret; 774 return ret;
775} 775}
776 776
777int rndis_filter_set_rss_param(struct rndis_device *rdev, 777static int rndis_set_rss_param_msg(struct rndis_device *rdev,
778 const u8 *rss_key) 778 const u8 *rss_key, u16 flag)
779{ 779{
780 struct net_device *ndev = rdev->ndev; 780 struct net_device *ndev = rdev->ndev;
781 struct rndis_request *request; 781 struct rndis_request *request;
@@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
804 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; 804 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
805 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; 805 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
806 rssp->hdr.size = sizeof(struct ndis_recv_scale_param); 806 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
807 rssp->flag = 0; 807 rssp->flag = flag;
808 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | 808 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
809 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | 809 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
810 NDIS_HASH_TCP_IPV6; 810 NDIS_HASH_TCP_IPV6;
@@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
829 829
830 wait_for_completion(&request->wait_event); 830 wait_for_completion(&request->wait_event);
831 set_complete = &request->response_msg.msg.set_complete; 831 set_complete = &request->response_msg.msg.set_complete;
832 if (set_complete->status == RNDIS_STATUS_SUCCESS) 832 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
833 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); 833 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
834 else { 834 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
835 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
836
837 } else {
835 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", 838 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
836 set_complete->status); 839 set_complete->status);
837 ret = -EINVAL; 840 ret = -EINVAL;
@@ -842,6 +845,16 @@ cleanup:
842 return ret; 845 return ret;
843} 846}
844 847
848int rndis_filter_set_rss_param(struct rndis_device *rdev,
849 const u8 *rss_key)
850{
851 /* Disable RSS before change */
852 rndis_set_rss_param_msg(rdev, rss_key,
853 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
854
855 return rndis_set_rss_param_msg(rdev, rss_key, 0);
856}
857
845static int rndis_filter_query_device_link_status(struct rndis_device *dev, 858static int rndis_filter_query_device_link_status(struct rndis_device *dev,
846 struct netvsc_device *net_device) 859 struct netvsc_device *net_device)
847{ 860{
@@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1121 * This breaks overlap of processing the host message for the 1134 * This breaks overlap of processing the host message for the
1122 * new primary channel with the initialization of sub-channels. 1135 * new primary channel with the initialization of sub-channels.
1123 */ 1136 */
1124int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) 1137int rndis_set_subchannel(struct net_device *ndev,
1138 struct netvsc_device *nvdev,
1139 struct netvsc_device_info *dev_info)
1125{ 1140{
1126 struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1141 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1127 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1142 struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1161 wait_event(nvdev->subchan_open, 1176 wait_event(nvdev->subchan_open,
1162 atomic_read(&nvdev->open_chn) == nvdev->num_chn); 1177 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1163 1178
1164 /* ignore failues from setting rss parameters, still have channels */ 1179 /* ignore failures from setting rss parameters, still have channels */
1165 rndis_filter_set_rss_param(rdev, netvsc_hash_key); 1180 if (dev_info)
1181 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1182 else
1183 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1166 1184
1167 netif_set_real_num_tx_queues(ndev, nvdev->num_chn); 1185 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1168 netif_set_real_num_rx_queues(ndev, nvdev->num_chn); 1186 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
index 8ebe7f5484ae..f14ba5366b91 100644
--- a/drivers/net/phy/asix.c
+++ b/drivers/net/phy/asix.c
@@ -1,13 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* Driver for Asix PHYs 2/* Driver for Asix PHYs
3 * 3 *
4 * Author: Michael Schmitz <schmitzmic@gmail.com> 4 * Author: Michael Schmitz <schmitzmic@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */ 5 */
12#include <linux/kernel.h> 6#include <linux/kernel.h>
13#include <linux/errno.h> 7#include <linux/errno.h>
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c
index b03fedd6c1d8..287f3ccf1da1 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/phy/mdio-hisi-femac.c
@@ -1,20 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Hisilicon Fast Ethernet MDIO Bus Driver 3 * Hisilicon Fast Ethernet MDIO Bus Driver
3 * 4 *
4 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. 5 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 6 */
19 7
20#include <linux/clk.h> 8#include <linux/clk.h>
@@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver);
163 151
164MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); 152MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver");
165MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); 153MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
166MODULE_LICENSE("GPL v2"); 154MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index f1da70b9b55f..95abf7072f32 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/** 2/**
2 * drivers/net/phy/rockchip.c 3 * drivers/net/phy/rockchip.c
3 * 4 *
@@ -6,12 +7,6 @@
6 * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd 7 * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
7 * 8 *
8 * David Wu <david.wu@rock-chips.com> 9 * David Wu <david.wu@rock-chips.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 */ 10 */
16 11
17#include <linux/ethtool.h> 12#include <linux/ethtool.h>
@@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl);
229 224
230MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); 225MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>");
231MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); 226MODULE_DESCRIPTION("Rockchip Ethernet PHY driver");
232MODULE_LICENSE("GPL v2"); 227MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05b2ccd..3d93993e74da 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
739 asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); 739 asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
740 chipcode &= AX_CHIPCODE_MASK; 740 chipcode &= AX_CHIPCODE_MASK;
741 741
742 (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : 742 ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
743 ax88772a_hw_reset(dev, 0); 743 ax88772a_hw_reset(dev, 0);
744
745 if (ret < 0) {
746 netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
747 return ret;
748 }
744 749
745 /* Read PHYID register *AFTER* the PHY was reset properly */ 750 /* Read PHYID register *AFTER* the PHY was reset properly */
746 phyid = asix_get_phyid(dev); 751 phyid = asix_get_phyid(dev);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3a4b8786f7ea..320edcac4699 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2761,6 +2761,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2761 BIT(NL80211_CHAN_WIDTH_160); 2761 BIT(NL80211_CHAN_WIDTH_160);
2762 } 2762 }
2763 2763
2764 if (!n_limits) {
2765 err = -EINVAL;
2766 goto failed_hw;
2767 }
2768
2764 data->if_combination.n_limits = n_limits; 2769 data->if_combination.n_limits = n_limits;
2765 data->if_combination.max_interfaces = 2048; 2770 data->if_combination.max_interfaces = 2048;
2766 data->if_combination.limits = data->if_limits; 2771 data->if_combination.limits = data->if_limits;
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 64b218699656..3a93e4d9828b 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
530 SET_NETDEV_DEV(dev, &priv->lowerdev->dev); 530 SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
531 dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); 531 dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
532 532
533 if (!dev->ieee80211_ptr) 533 if (!dev->ieee80211_ptr) {
534 err = -ENOMEM;
534 goto remove_handler; 535 goto remove_handler;
536 }
535 537
536 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; 538 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
537 dev->ieee80211_ptr->wiphy = common_wiphy; 539 dev->ieee80211_ptr->wiphy = common_wiphy;
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 0cf58cabc9ed..3cf50274fadb 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev)
26 struct nvdimm_drvdata *ndd; 26 struct nvdimm_drvdata *ndd;
27 int rc; 27 int rc;
28 28
29 rc = nvdimm_security_setup_events(dev);
30 if (rc < 0) {
31 dev_err(dev, "security event setup failed: %d\n", rc);
32 return rc;
33 }
34
29 rc = nvdimm_check_config_data(dev); 35 rc = nvdimm_check_config_data(dev);
30 if (rc) { 36 if (rc) {
31 /* not required for non-aliased nvdimm, ex. NVDIMM-N */ 37 /* not required for non-aliased nvdimm, ex. NVDIMM-N */
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 4890310df874..efe412a6b5b9 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -578,13 +578,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
578} 578}
579EXPORT_SYMBOL_GPL(__nvdimm_create); 579EXPORT_SYMBOL_GPL(__nvdimm_create);
580 580
581int nvdimm_security_setup_events(struct nvdimm *nvdimm) 581static void shutdown_security_notify(void *data)
582{ 582{
583 nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd, 583 struct nvdimm *nvdimm = data;
584 "security"); 584
585 sysfs_put(nvdimm->sec.overwrite_state);
586}
587
588int nvdimm_security_setup_events(struct device *dev)
589{
590 struct nvdimm *nvdimm = to_nvdimm(dev);
591
592 if (nvdimm->sec.state < 0 || !nvdimm->sec.ops
593 || !nvdimm->sec.ops->overwrite)
594 return 0;
595 nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
585 if (!nvdimm->sec.overwrite_state) 596 if (!nvdimm->sec.overwrite_state)
586 return -ENODEV; 597 return -ENOMEM;
587 return 0; 598
599 return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
588} 600}
589EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); 601EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
590 602
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index cfde992684e7..379bf4305e61 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
250void nvdimm_set_aliasing(struct device *dev); 250void nvdimm_set_aliasing(struct device *dev);
251void nvdimm_set_locked(struct device *dev); 251void nvdimm_set_locked(struct device *dev);
252void nvdimm_clear_locked(struct device *dev); 252void nvdimm_clear_locked(struct device *dev);
253int nvdimm_security_setup_events(struct device *dev);
253#if IS_ENABLED(CONFIG_NVDIMM_KEYS) 254#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
254int nvdimm_security_unlock(struct device *dev); 255int nvdimm_security_unlock(struct device *dev);
255#else 256#else
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index df4b3a6db51b..b9fff3b8ed1b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
545 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); 545 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
546 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + 546 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
547 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); 547 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
548 if (!(ctrl->anacap & (1 << 6))) 548 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
549 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
550 549
551 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { 550 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
552 dev_err(ctrl->device, 551 dev_err(ctrl->device,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9bc585415d9b..a0fdd5fb4e7a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2041,53 +2041,52 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
2041 return ret; 2041 return ret;
2042} 2042}
2043 2043
2044/* irq_queues covers admin queue */ 2044/*
2045static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) 2045 * nirqs is the number of interrupts available for write and read
2046 * queues. The core already reserved an interrupt for the admin queue.
2047 */
2048static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
2046{ 2049{
2047 unsigned int this_w_queues = write_queues; 2050 struct nvme_dev *dev = affd->priv;
2048 2051 unsigned int nr_read_queues;
2049 WARN_ON(!irq_queues);
2050
2051 /*
2052 * Setup read/write queue split, assign admin queue one independent
2053 * irq vector if irq_queues is > 1.
2054 */
2055 if (irq_queues <= 2) {
2056 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2057 dev->io_queues[HCTX_TYPE_READ] = 0;
2058 return;
2059 }
2060
2061 /*
2062 * If 'write_queues' is set, ensure it leaves room for at least
2063 * one read queue and one admin queue
2064 */
2065 if (this_w_queues >= irq_queues)
2066 this_w_queues = irq_queues - 2;
2067 2052
2068 /* 2053 /*
2069 * If 'write_queues' is set to zero, reads and writes will share 2054 * If there is no interupt available for queues, ensure that
2070 * a queue set. 2055 * the default queue is set to 1. The affinity set size is
2056 * also set to one, but the irq core ignores it for this case.
2057 *
2058 * If only one interrupt is available or 'write_queue' == 0, combine
2059 * write and read queues.
2060 *
2061 * If 'write_queues' > 0, ensure it leaves room for at least one read
2062 * queue.
2071 */ 2063 */
2072 if (!this_w_queues) { 2064 if (!nrirqs) {
2073 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; 2065 nrirqs = 1;
2074 dev->io_queues[HCTX_TYPE_READ] = 0; 2066 nr_read_queues = 0;
2067 } else if (nrirqs == 1 || !write_queues) {
2068 nr_read_queues = 0;
2069 } else if (write_queues >= nrirqs) {
2070 nr_read_queues = 1;
2075 } else { 2071 } else {
2076 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; 2072 nr_read_queues = nrirqs - write_queues;
2077 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
2078 } 2073 }
2074
2075 dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2076 affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
2077 dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
2078 affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
2079 affd->nr_sets = nr_read_queues ? 2 : 1;
2079} 2080}
2080 2081
2081static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 2082static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2082{ 2083{
2083 struct pci_dev *pdev = to_pci_dev(dev->dev); 2084 struct pci_dev *pdev = to_pci_dev(dev->dev);
2084 int irq_sets[2];
2085 struct irq_affinity affd = { 2085 struct irq_affinity affd = {
2086 .pre_vectors = 1, 2086 .pre_vectors = 1,
2087 .nr_sets = ARRAY_SIZE(irq_sets), 2087 .calc_sets = nvme_calc_irq_sets,
2088 .sets = irq_sets, 2088 .priv = dev,
2089 }; 2089 };
2090 int result = 0;
2091 unsigned int irq_queues, this_p_queues; 2090 unsigned int irq_queues, this_p_queues;
2092 2091
2093 /* 2092 /*
@@ -2103,51 +2102,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2103 } 2102 }
2104 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 2103 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2105 2104
2106 /* 2105 /* Initialize for the single interrupt case */
2107 * For irq sets, we have to ask for minvec == maxvec. This passes 2106 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2108 * any reduction back to us, so we can adjust our queue counts and 2107 dev->io_queues[HCTX_TYPE_READ] = 0;
2109 * IRQ vector needs.
2110 */
2111 do {
2112 nvme_calc_io_queues(dev, irq_queues);
2113 irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT];
2114 irq_sets[1] = dev->io_queues[HCTX_TYPE_READ];
2115 if (!irq_sets[1])
2116 affd.nr_sets = 1;
2117
2118 /*
2119 * If we got a failure and we're down to asking for just
2120 * 1 + 1 queues, just ask for a single vector. We'll share
2121 * that between the single IO queue and the admin queue.
2122 * Otherwise, we assign one independent vector to admin queue.
2123 */
2124 if (irq_queues > 1)
2125 irq_queues = irq_sets[0] + irq_sets[1] + 1;
2126 2108
2127 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, 2109 return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
2128 irq_queues, 2110 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2129 PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
2130
2131 /*
2132 * Need to reduce our vec counts. If we get ENOSPC, the
2133 * platform should support mulitple vecs, we just need
2134 * to decrease our ask. If we get EINVAL, the platform
2135 * likely does not. Back down to ask for just one vector.
2136 */
2137 if (result == -ENOSPC) {
2138 irq_queues--;
2139 if (!irq_queues)
2140 return result;
2141 continue;
2142 } else if (result == -EINVAL) {
2143 irq_queues = 1;
2144 continue;
2145 } else if (result <= 0)
2146 return -EIO;
2147 break;
2148 } while (1);
2149
2150 return result;
2151} 2111}
2152 2112
2153static void nvme_disable_io_queues(struct nvme_dev *dev) 2113static void nvme_disable_io_queues(struct nvme_dev *dev)
@@ -3020,6 +2980,7 @@ static struct pci_driver nvme_driver = {
3020 2980
3021static int __init nvme_init(void) 2981static int __init nvme_init(void)
3022{ 2982{
2983 BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
3023 return pci_register_driver(&nvme_driver); 2984 return pci_register_driver(&nvme_driver);
3024} 2985}
3025 2986
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0a2fd2949ad7..52abc3a6de12 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -119,6 +119,7 @@ struct nvme_rdma_ctrl {
119 119
120 struct nvme_ctrl ctrl; 120 struct nvme_ctrl ctrl;
121 bool use_inline_data; 121 bool use_inline_data;
122 u32 io_queues[HCTX_MAX_TYPES];
122}; 123};
123 124
124static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 125static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
165static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) 166static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
166{ 167{
167 return nvme_rdma_queue_idx(queue) > 168 return nvme_rdma_queue_idx(queue) >
168 queue->ctrl->ctrl.opts->nr_io_queues + 169 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
169 queue->ctrl->ctrl.opts->nr_write_queues; 170 queue->ctrl->io_queues[HCTX_TYPE_READ];
170} 171}
171 172
172static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 173static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
@@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
661 nr_io_queues = min_t(unsigned int, nr_io_queues, 662 nr_io_queues = min_t(unsigned int, nr_io_queues,
662 ibdev->num_comp_vectors); 663 ibdev->num_comp_vectors);
663 664
664 nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); 665 if (opts->nr_write_queues) {
665 nr_io_queues += min(opts->nr_poll_queues, num_online_cpus()); 666 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
667 min(opts->nr_write_queues, nr_io_queues);
668 nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
669 } else {
670 ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
671 }
672
673 ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
674
675 if (opts->nr_poll_queues) {
676 ctrl->io_queues[HCTX_TYPE_POLL] =
677 min(opts->nr_poll_queues, num_online_cpus());
678 nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
679 }
666 680
667 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 681 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
668 if (ret) 682 if (ret)
@@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return
1689nvme_rdma_timeout(struct request *rq, bool reserved) 1703nvme_rdma_timeout(struct request *rq, bool reserved)
1690{ 1704{
1691 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1705 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1706 struct nvme_rdma_queue *queue = req->queue;
1707 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1692 1708
1693 dev_warn(req->queue->ctrl->ctrl.device, 1709 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
1694 "I/O %d QID %d timeout, reset controller\n", 1710 rq->tag, nvme_rdma_queue_idx(queue));
1695 rq->tag, nvme_rdma_queue_idx(req->queue));
1696 1711
1697 /* queue error recovery */ 1712 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1698 nvme_rdma_error_recovery(req->queue->ctrl); 1713 /*
1714 * Teardown immediately if controller times out while starting
1715 * or we are already started error recovery. all outstanding
1716 * requests are completed on shutdown, so we return BLK_EH_DONE.
1717 */
1718 flush_work(&ctrl->err_work);
1719 nvme_rdma_teardown_io_queues(ctrl, false);
1720 nvme_rdma_teardown_admin_queue(ctrl, false);
1721 return BLK_EH_DONE;
1722 }
1699 1723
1700 /* fail with DNR on cmd timeout */ 1724 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1701 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1725 nvme_rdma_error_recovery(ctrl);
1702 1726
1703 return BLK_EH_DONE; 1727 return BLK_EH_RESET_TIMER;
1704} 1728}
1705 1729
1706static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1730static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1779 struct nvme_rdma_ctrl *ctrl = set->driver_data; 1803 struct nvme_rdma_ctrl *ctrl = set->driver_data;
1780 1804
1781 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 1805 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1782 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues; 1806 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1807 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1808 set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
1783 if (ctrl->ctrl.opts->nr_write_queues) { 1809 if (ctrl->ctrl.opts->nr_write_queues) {
1784 /* separate read/write queues */ 1810 /* separate read/write queues */
1785 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1786 ctrl->ctrl.opts->nr_write_queues;
1787 set->map[HCTX_TYPE_READ].queue_offset = 1811 set->map[HCTX_TYPE_READ].queue_offset =
1788 ctrl->ctrl.opts->nr_write_queues; 1812 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1789 } else { 1813 } else {
1790 /* mixed read/write queues */ 1814 /* mixed read/write queues */
1791 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1792 ctrl->ctrl.opts->nr_io_queues;
1793 set->map[HCTX_TYPE_READ].queue_offset = 0; 1815 set->map[HCTX_TYPE_READ].queue_offset = 0;
1794 } 1816 }
1795 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], 1817 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1799 1821
1800 if (ctrl->ctrl.opts->nr_poll_queues) { 1822 if (ctrl->ctrl.opts->nr_poll_queues) {
1801 set->map[HCTX_TYPE_POLL].nr_queues = 1823 set->map[HCTX_TYPE_POLL].nr_queues =
1802 ctrl->ctrl.opts->nr_poll_queues; 1824 ctrl->io_queues[HCTX_TYPE_POLL];
1803 set->map[HCTX_TYPE_POLL].queue_offset = 1825 set->map[HCTX_TYPE_POLL].queue_offset =
1804 ctrl->ctrl.opts->nr_io_queues; 1826 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1805 if (ctrl->ctrl.opts->nr_write_queues) 1827 if (ctrl->ctrl.opts->nr_write_queues)
1806 set->map[HCTX_TYPE_POLL].queue_offset += 1828 set->map[HCTX_TYPE_POLL].queue_offset +=
1807 ctrl->ctrl.opts->nr_write_queues; 1829 ctrl->io_queues[HCTX_TYPE_READ];
1808 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 1830 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1809 } 1831 }
1810 return 0; 1832 return 0;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 265a0543b381..5f0a00425242 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1948,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
1948 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; 1948 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
1949 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 1949 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1950 1950
1951 dev_dbg(ctrl->ctrl.device, 1951 dev_warn(ctrl->ctrl.device,
1952 "queue %d: timeout request %#x type %d\n", 1952 "queue %d: timeout request %#x type %d\n",
1953 nvme_tcp_queue_id(req->queue), rq->tag, 1953 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
1954 pdu->hdr.type);
1955 1954
1956 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { 1955 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1957 union nvme_result res = {}; 1956 /*
1958 1957 * Teardown immediately if controller times out while starting
1959 nvme_req(rq)->flags |= NVME_REQ_CANCELLED; 1958 * or we are already started error recovery. all outstanding
1960 nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res); 1959 * requests are completed on shutdown, so we return BLK_EH_DONE.
1960 */
1961 flush_work(&ctrl->err_work);
1962 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
1963 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
1961 return BLK_EH_DONE; 1964 return BLK_EH_DONE;
1962 } 1965 }
1963 1966
1964 /* queue error recovery */ 1967 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1965 nvme_tcp_error_recovery(&ctrl->ctrl); 1968 nvme_tcp_error_recovery(&ctrl->ctrl);
1966 1969
1967 return BLK_EH_RESET_TIMER; 1970 return BLK_EH_RESET_TIMER;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index a8d23eb80192..a884e3a0e8af 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
142static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
143 struct nvmet_rdma_rsp *r);
144static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
145 struct nvmet_rdma_rsp *r);
142 146
143static const struct nvmet_fabrics_ops nvmet_rdma_ops; 147static const struct nvmet_fabrics_ops nvmet_rdma_ops;
144 148
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
182 spin_unlock_irqrestore(&queue->rsps_lock, flags); 186 spin_unlock_irqrestore(&queue->rsps_lock, flags);
183 187
184 if (unlikely(!rsp)) { 188 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); 189 int ret;
190
191 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp)) 192 if (unlikely(!rsp))
187 return NULL; 193 return NULL;
194 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
195 if (unlikely(ret)) {
196 kfree(rsp);
197 return NULL;
198 }
199
188 rsp->allocated = true; 200 rsp->allocated = true;
189 } 201 }
190 202
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
197 unsigned long flags; 209 unsigned long flags;
198 210
199 if (unlikely(rsp->allocated)) { 211 if (unlikely(rsp->allocated)) {
212 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
200 kfree(rsp); 213 kfree(rsp);
201 return; 214 return;
202 } 215 }
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 52e47dac028f..80f843030e36 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev)
310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
311 if (IS_ERR(imx6_pcie->pd_pcie)) 311 if (IS_ERR(imx6_pcie->pd_pcie))
312 return PTR_ERR(imx6_pcie->pd_pcie); 312 return PTR_ERR(imx6_pcie->pd_pcie);
313 /* Do nothing when power domain missing */
314 if (!imx6_pcie->pd_pcie)
315 return 0;
313 link = device_link_add(dev, imx6_pcie->pd_pcie, 316 link = device_link_add(dev, imx6_pcie->pd_pcie,
314 DL_FLAG_STATELESS | 317 DL_FLAG_STATELESS |
315 DL_FLAG_PM_RUNTIME | 318 DL_FLAG_PM_RUNTIME |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev)
323 if (IS_ERR(imx6_pcie->pd_pcie_phy)) 326 if (IS_ERR(imx6_pcie->pd_pcie_phy))
324 return PTR_ERR(imx6_pcie->pd_pcie_phy); 327 return PTR_ERR(imx6_pcie->pd_pcie_phy);
325 328
326 device_link_add(dev, imx6_pcie->pd_pcie_phy, 329 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
327 DL_FLAG_STATELESS | 330 DL_FLAG_STATELESS |
328 DL_FLAG_PM_RUNTIME | 331 DL_FLAG_PM_RUNTIME |
329 DL_FLAG_RPM_ACTIVE); 332 DL_FLAG_RPM_ACTIVE);
330 if (IS_ERR(link)) { 333 if (!link) {
331 dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); 334 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
332 return PTR_ERR(link); 335 return -EINVAL;
333 } 336 }
334 337
335 return 0; 338 return 0;
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b171b6bc15c8..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -22,7 +22,6 @@
22#include <linux/resource.h> 22#include <linux/resource.h>
23#include <linux/of_pci.h> 23#include <linux/of_pci.h>
24#include <linux/of_irq.h> 24#include <linux/of_irq.h>
25#include <linux/gpio/consumer.h>
26 25
27#include "pcie-designware.h" 26#include "pcie-designware.h"
28 27
@@ -30,7 +29,6 @@ struct armada8k_pcie {
30 struct dw_pcie *pci; 29 struct dw_pcie *pci;
31 struct clk *clk; 30 struct clk *clk;
32 struct clk *clk_reg; 31 struct clk *clk_reg;
33 struct gpio_desc *reset_gpio;
34}; 32};
35 33
36#define PCIE_VENDOR_REGS_OFFSET 0x8000 34#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
139 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 137 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
140 struct armada8k_pcie *pcie = to_armada8k_pcie(pci); 138 struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
141 139
142 if (pcie->reset_gpio) {
143 /* assert and then deassert the reset signal */
144 gpiod_set_value_cansleep(pcie->reset_gpio, 1);
145 msleep(100);
146 gpiod_set_value_cansleep(pcie->reset_gpio, 0);
147 }
148 dw_pcie_setup_rc(pp); 140 dw_pcie_setup_rc(pp);
149 armada8k_pcie_establish_link(pcie); 141 armada8k_pcie_establish_link(pcie);
150 142
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
257 goto fail_clkreg; 249 goto fail_clkreg;
258 } 250 }
259 251
260 /* Get reset gpio signal and hold asserted (logically high) */
261 pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
262 GPIOD_OUT_HIGH);
263 if (IS_ERR(pcie->reset_gpio)) {
264 ret = PTR_ERR(pcie->reset_gpio);
265 goto fail_clkreg;
266 }
267
268 platform_set_drvdata(pdev, pcie); 252 platform_set_drvdata(pdev, pcie);
269 253
270 ret = armada8k_add_pcie_port(pcie, pdev); 254 ret = armada8k_add_pcie_port(pcie, pdev);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4c0b47867258..73986825d221 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -532,7 +532,7 @@ error_attrs:
532} 532}
533 533
534static struct msi_desc * 534static struct msi_desc *
535msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) 535msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
536{ 536{
537 struct irq_affinity_desc *masks = NULL; 537 struct irq_affinity_desc *masks = NULL;
538 struct msi_desc *entry; 538 struct msi_desc *entry;
@@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev)
597 * which could have been allocated. 597 * which could have been allocated.
598 */ 598 */
599static int msi_capability_init(struct pci_dev *dev, int nvec, 599static int msi_capability_init(struct pci_dev *dev, int nvec,
600 const struct irq_affinity *affd) 600 struct irq_affinity *affd)
601{ 601{
602 struct msi_desc *entry; 602 struct msi_desc *entry;
603 int ret; 603 int ret;
@@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
669 669
670static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, 670static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
671 struct msix_entry *entries, int nvec, 671 struct msix_entry *entries, int nvec,
672 const struct irq_affinity *affd) 672 struct irq_affinity *affd)
673{ 673{
674 struct irq_affinity_desc *curmsk, *masks = NULL; 674 struct irq_affinity_desc *curmsk, *masks = NULL;
675 struct msi_desc *entry; 675 struct msi_desc *entry;
@@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev,
736 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 736 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
737 **/ 737 **/
738static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, 738static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
739 int nvec, const struct irq_affinity *affd) 739 int nvec, struct irq_affinity *affd)
740{ 740{
741 int ret; 741 int ret;
742 u16 control; 742 u16 control;
@@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
932EXPORT_SYMBOL(pci_msix_vec_count); 932EXPORT_SYMBOL(pci_msix_vec_count);
933 933
934static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, 934static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
935 int nvec, const struct irq_affinity *affd) 935 int nvec, struct irq_affinity *affd)
936{ 936{
937 int nr_entries; 937 int nr_entries;
938 int i, j; 938 int i, j;
@@ -1018,7 +1018,7 @@ int pci_msi_enabled(void)
1018EXPORT_SYMBOL(pci_msi_enabled); 1018EXPORT_SYMBOL(pci_msi_enabled);
1019 1019
1020static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, 1020static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1021 const struct irq_affinity *affd) 1021 struct irq_affinity *affd)
1022{ 1022{
1023 int nvec; 1023 int nvec;
1024 int rc; 1024 int rc;
@@ -1035,13 +1035,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1035 if (maxvec < minvec) 1035 if (maxvec < minvec)
1036 return -ERANGE; 1036 return -ERANGE;
1037 1037
1038 /*
1039 * If the caller is passing in sets, we can't support a range of
1040 * vectors. The caller needs to handle that.
1041 */
1042 if (affd && affd->nr_sets && minvec != maxvec)
1043 return -EINVAL;
1044
1045 if (WARN_ON_ONCE(dev->msi_enabled)) 1038 if (WARN_ON_ONCE(dev->msi_enabled))
1046 return -EINVAL; 1039 return -EINVAL;
1047 1040
@@ -1086,20 +1079,13 @@ EXPORT_SYMBOL(pci_enable_msi);
1086 1079
1087static int __pci_enable_msix_range(struct pci_dev *dev, 1080static int __pci_enable_msix_range(struct pci_dev *dev,
1088 struct msix_entry *entries, int minvec, 1081 struct msix_entry *entries, int minvec,
1089 int maxvec, const struct irq_affinity *affd) 1082 int maxvec, struct irq_affinity *affd)
1090{ 1083{
1091 int rc, nvec = maxvec; 1084 int rc, nvec = maxvec;
1092 1085
1093 if (maxvec < minvec) 1086 if (maxvec < minvec)
1094 return -ERANGE; 1087 return -ERANGE;
1095 1088
1096 /*
1097 * If the caller is passing in sets, we can't support a range of
1098 * supported vectors. The caller needs to handle that.
1099 */
1100 if (affd && affd->nr_sets && minvec != maxvec)
1101 return -EINVAL;
1102
1103 if (WARN_ON_ONCE(dev->msix_enabled)) 1089 if (WARN_ON_ONCE(dev->msix_enabled))
1104 return -EINVAL; 1090 return -EINVAL;
1105 1091
@@ -1165,9 +1151,9 @@ EXPORT_SYMBOL(pci_enable_msix_range);
1165 */ 1151 */
1166int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1152int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1167 unsigned int max_vecs, unsigned int flags, 1153 unsigned int max_vecs, unsigned int flags,
1168 const struct irq_affinity *affd) 1154 struct irq_affinity *affd)
1169{ 1155{
1170 static const struct irq_affinity msi_default_affd; 1156 struct irq_affinity msi_default_affd = {0};
1171 int msix_vecs = -ENOSPC; 1157 int msix_vecs = -ENOSPC;
1172 int msi_vecs = -ENOSPC; 1158 int msi_vecs = -ENOSPC;
1173 1159
@@ -1196,6 +1182,13 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1196 /* use legacy irq if allowed */ 1182 /* use legacy irq if allowed */
1197 if (flags & PCI_IRQ_LEGACY) { 1183 if (flags & PCI_IRQ_LEGACY) {
1198 if (min_vecs == 1 && dev->irq) { 1184 if (min_vecs == 1 && dev->irq) {
1185 /*
1186 * Invoke the affinity spreading logic to ensure that
1187 * the device driver can adjust queue configuration
1188 * for the single interrupt case.
1189 */
1190 if (affd)
1191 irq_create_affinity_masks(1, affd);
1199 pci_intx(dev, 1); 1192 pci_intx(dev, 1);
1200 return 1; 1193 return 1;
1201 } 1194 }
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07ab345..09a77e556ece 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
31 31
32 err = reset_control_deassert(priv->reset); 32 err = reset_control_deassert(priv->reset);
33 if (err && priv->no_suspend_override) 33 if (err && priv->no_suspend_override)
34 reset_control_assert(priv->no_suspend_override); 34 reset_control_deassert(priv->no_suspend_override);
35 35
36 return err; 36 return err;
37} 37}
@@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
69 if (!priv) 69 if (!priv)
70 return -ENOMEM; 70 return -ENOMEM;
71 71
72 priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); 72 priv->reset = devm_reset_control_get(&pdev->dev, "phy");
73 if (IS_ERR(priv->reset)) 73 if (IS_ERR(priv->reset))
74 return PTR_ERR(priv->reset); 74 return PTR_ERR(priv->reset);
75 75
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 77fdaa551977..a52c5bb35033 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
204 204
205 if (args->args_count < 1) 205 if (args->args_count < 1)
206 return ERR_PTR(-EINVAL); 206 return ERR_PTR(-EINVAL);
207 if (!priv || !priv->if_phys)
208 return ERR_PTR(-ENODEV);
207 if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && 209 if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
208 args->args_count < 2) 210 args->args_count < 2)
209 return ERR_PTR(-EINVAL); 211 return ERR_PTR(-EINVAL);
210 if (!priv || !priv->if_phys)
211 return ERR_PTR(-ENODEV);
212 if (phy_id > priv->soc_data->num_ports) 212 if (phy_id > priv->soc_data->num_ports)
213 return ERR_PTR(-EINVAL); 213 return ERR_PTR(-EINVAL);
214 if (phy_id != priv->if_phys[phy_id - 1].id) 214 if (phy_id != priv->if_phys[phy_id - 1].id)
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 05044e323ea5..03ec7a5d9d0b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1513 .matches = { 1513 .matches = {
1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1516 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1516 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1517 }, 1517 },
1518 }, 1518 },
1519 { 1519 {
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1521 .matches = { 1521 .matches = {
1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), 1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1524 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1524 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1525 }, 1525 },
1526 }, 1526 },
1527 { 1527 {
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1529 .matches = { 1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1532 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1532 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1533 }, 1533 },
1534 }, 1534 },
1535 { 1535 {
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1537 .matches = { 1537 .matches = {
1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1540 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1540 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1541 }, 1541 },
1542 }, 1542 },
1543 {} 1543 {}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1817786ab6aa..a005cbccb4f7 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -45,12 +45,14 @@ config PINCTRL_MT2701
45config PINCTRL_MT7623 45config PINCTRL_MT7623
46 bool "Mediatek MT7623 pin control with generic binding" 46 bool "Mediatek MT7623 pin control with generic binding"
47 depends on MACH_MT7623 || COMPILE_TEST 47 depends on MACH_MT7623 || COMPILE_TEST
48 depends on OF
48 default MACH_MT7623 49 default MACH_MT7623
49 select PINCTRL_MTK_MOORE 50 select PINCTRL_MTK_MOORE
50 51
51config PINCTRL_MT7629 52config PINCTRL_MT7629
52 bool "Mediatek MT7629 pin control" 53 bool "Mediatek MT7629 pin control"
53 depends on MACH_MT7629 || COMPILE_TEST 54 depends on MACH_MT7629 || COMPILE_TEST
55 depends on OF
54 default MACH_MT7629 56 default MACH_MT7629
55 select PINCTRL_MTK_MOORE 57 select PINCTRL_MTK_MOORE
56 58
@@ -92,6 +94,7 @@ config PINCTRL_MT6797
92 94
93config PINCTRL_MT7622 95config PINCTRL_MT7622
94 bool "MediaTek MT7622 pin control" 96 bool "MediaTek MT7622 pin control"
97 depends on OF
95 depends on ARM64 || COMPILE_TEST 98 depends on ARM64 || COMPILE_TEST
96 default ARM64 && ARCH_MEDIATEK 99 default ARM64 && ARCH_MEDIATEK
97 select PINCTRL_MTK_MOORE 100 select PINCTRL_MTK_MOORE
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b03481ef99a1..98905d4a79ca 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
832 break; 832 break;
833 833
834 case MCP_TYPE_S18: 834 case MCP_TYPE_S18:
835 one_regmap_config =
836 devm_kmemdup(dev, &mcp23x17_regmap,
837 sizeof(struct regmap_config), GFP_KERNEL);
838 if (!one_regmap_config)
839 return -ENOMEM;
835 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, 840 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
836 &mcp23x17_regmap); 841 one_regmap_config);
837 mcp->reg_shift = 1; 842 mcp->reg_shift = 1;
838 mcp->chip.ngpio = 16; 843 mcp->chip.ngpio = 16;
839 mcp->chip.label = "mcp23s18"; 844 mcp->chip.label = "mcp23s18";
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b58125568..ef4268cc6227 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
588static const struct sunxi_pinctrl_desc h6_pinctrl_data = { 588static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
589 .pins = h6_pins, 589 .pins = h6_pins,
590 .npins = ARRAY_SIZE(h6_pins), 590 .npins = ARRAY_SIZE(h6_pins),
591 .irq_banks = 3, 591 .irq_banks = 4,
592 .irq_bank_map = h6_irq_bank_map, 592 .irq_bank_map = h6_irq_bank_map,
593 .irq_read_needs_mux = true, 593 .irq_read_needs_mux = true,
594}; 594};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 5d9184d18c16..0e7fa69e93df 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
698{ 698{
699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
700 unsigned short bank = offset / PINS_PER_BANK; 700 unsigned short bank = offset / PINS_PER_BANK;
701 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 701 unsigned short bank_offset = bank - pctl->desc->pin_base /
702 struct regulator *reg; 702 PINS_PER_BANK;
703 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
704 struct regulator *reg = s_reg->regulator;
705 char supply[16];
703 int ret; 706 int ret;
704 707
705 reg = s_reg->regulator; 708 if (reg) {
706 if (!reg) {
707 char supply[16];
708
709 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
710 reg = regulator_get(pctl->dev, supply);
711 if (IS_ERR(reg)) {
712 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
713 'A' + bank);
714 return PTR_ERR(reg);
715 }
716
717 s_reg->regulator = reg;
718 refcount_set(&s_reg->refcount, 1);
719 } else {
720 refcount_inc(&s_reg->refcount); 709 refcount_inc(&s_reg->refcount);
710 return 0;
711 }
712
713 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
714 reg = regulator_get(pctl->dev, supply);
715 if (IS_ERR(reg)) {
716 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
717 'A' + bank);
718 return PTR_ERR(reg);
721 } 719 }
722 720
723 ret = regulator_enable(reg); 721 ret = regulator_enable(reg);
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
727 goto out; 725 goto out;
728 } 726 }
729 727
728 s_reg->regulator = reg;
729 refcount_set(&s_reg->refcount, 1);
730
730 return 0; 731 return 0;
731 732
732out: 733out:
733 if (refcount_dec_and_test(&s_reg->refcount)) { 734 regulator_put(s_reg->regulator);
734 regulator_put(s_reg->regulator);
735 s_reg->regulator = NULL;
736 }
737 735
738 return ret; 736 return ret;
739} 737}
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
742{ 740{
743 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 741 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
744 unsigned short bank = offset / PINS_PER_BANK; 742 unsigned short bank = offset / PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 743 unsigned short bank_offset = bank - pctl->desc->pin_base /
744 PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
746 746
747 if (!refcount_dec_and_test(&s_reg->refcount)) 747 if (!refcount_dec_and_test(&s_reg->refcount))
748 return 0; 748 return 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e340d2a24b44..034c0317c8d6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -136,7 +136,7 @@ struct sunxi_pinctrl {
136 struct gpio_chip *chip; 136 struct gpio_chip *chip;
137 const struct sunxi_pinctrl_desc *desc; 137 const struct sunxi_pinctrl_desc *desc;
138 struct device *dev; 138 struct device *dev;
139 struct sunxi_pinctrl_regulator regulators[12]; 139 struct sunxi_pinctrl_regulator regulators[9];
140 struct irq_domain *domain; 140 struct irq_domain *domain;
141 struct sunxi_pinctrl_function *functions; 141 struct sunxi_pinctrl_function *functions;
142 unsigned nfunctions; 142 unsigned nfunctions;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5e2109c54c7c..b5e9db85e881 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -905,6 +905,7 @@ config TOSHIBA_WMI
905config ACPI_CMPC 905config ACPI_CMPC
906 tristate "CMPC Laptop Extras" 906 tristate "CMPC Laptop Extras"
907 depends on ACPI && INPUT 907 depends on ACPI && INPUT
908 depends on BACKLIGHT_LCD_SUPPORT
908 depends on RFKILL || RFKILL=n 909 depends on RFKILL || RFKILL=n
909 select BACKLIGHT_CLASS_DEVICE 910 select BACKLIGHT_CLASS_DEVICE
910 help 911 help
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL
1128config SAMSUNG_Q10 1129config SAMSUNG_Q10
1129 tristate "Samsung Q10 Extras" 1130 tristate "Samsung Q10 Extras"
1130 depends on ACPI 1131 depends on ACPI
1132 depends on BACKLIGHT_LCD_SUPPORT
1131 select BACKLIGHT_CLASS_DEVICE 1133 select BACKLIGHT_CLASS_DEVICE
1132 ---help--- 1134 ---help---
1133 This driver provides support for backlight control on Samsung Q10 1135 This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5c8580..039b2074db7e 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
60 60
61static void __ref sclp_cpu_change_notify(struct work_struct *work) 61static void __ref sclp_cpu_change_notify(struct work_struct *work)
62{ 62{
63 lock_device_hotplug();
63 smp_rescan_cpus(); 64 smp_rescan_cpus();
65 unlock_device_hotplug();
64} 66}
65 67
66static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 68static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cf30d124b9e..e390f8c6d5f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
403 goto failed; 403 goto failed;
404 404
405 /* report size limit per scatter-gather segment */ 405 /* report size limit per scatter-gather segment */
406 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
407 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 406 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
408 407
409 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; 408 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 00acc7144bbc..f4f6a07c5222 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
430 /* GCD, adjusted later */ 430 /* GCD, adjusted later */
431 /* report size limit per scatter-gather segment */
432 .max_segment_size = ZFCP_QDIO_SBALE_LEN,
431 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 433 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
432 .shost_attrs = zfcp_sysfs_shost_attrs, 434 .shost_attrs = zfcp_sysfs_shost_attrs,
433 .sdev_attrs = zfcp_sysfs_sdev_attrs, 435 .sdev_attrs = zfcp_sysfs_sdev_attrs,
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 128d658d472a..16957d7ac414 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
295 if(tpnt->sdev_attrs == NULL) 295 if(tpnt->sdev_attrs == NULL)
296 tpnt->sdev_attrs = NCR_700_dev_attrs; 296 tpnt->sdev_attrs = NCR_700_dev_attrs;
297 297
298 memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, 298 memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); 299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
300 if(memory == NULL) { 300 if(memory == NULL) {
301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); 301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 634ddb90e7aa..7e56a11836c1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1747 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1747 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1748 } 1748 }
1749 1749
1750 error = dma_set_max_seg_size(&pdev->dev, 1750 if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
1751 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? 1751 shost->max_segment_size = shost->max_sectors << 9;
1752 (shost->max_sectors << 9) : 65536); 1752 else
1753 if (error) 1753 shost->max_segment_size = 65536;
1754 goto out_deinit;
1755 1754
1756 /* 1755 /*
1757 * Firmware printf works only with older firmware. 1756 * Firmware printf works only with older firmware.
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 74e260027c7d..76e49d902609 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
3566 3566
3567 /* if eqid_count == 1 fall back to INTX */ 3567 /* if eqid_count == 1 fall back to INTX */
3568 if (enable_msix && nvec > 1) { 3568 if (enable_msix && nvec > 1) {
3569 const struct irq_affinity desc = { .post_vectors = 1 }; 3569 struct irq_affinity desc = { .post_vectors = 1 };
3570 3570
3571 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3571 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
3572 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3572 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c13a5b..bc9f2a2365f4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
240 return NULL; 240 return NULL;
241 } 241 }
242 242
243 cmgr->hba = hba;
243 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), 244 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
244 GFP_KERNEL); 245 GFP_KERNEL);
245 if (!cmgr->free_list) { 246 if (!cmgr->free_list) {
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
256 goto mem_err; 257 goto mem_err;
257 } 258 }
258 259
259 cmgr->hba = hba;
260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
261 261
262 for (i = 0; i < arr_sz; i++) { 262 for (i = 0; i < arr_sz; i++) {
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
295 295
296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297 mem_size = num_ios * sizeof(struct io_bdt *); 297 mem_size = num_ios * sizeof(struct io_bdt *);
298 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 298 cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299 if (!cmgr->io_bdt_pool) { 299 if (!cmgr->io_bdt_pool) {
300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
301 goto mem_err; 301 goto mem_err;
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a004036e3d7..9bd2bd8dc2be 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
594 } 594 }
595 595
596 fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); 596 fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
597 ln->fc_vport = fc_vport;
597 598
598 if (csio_fcoe_alloc_vnp(hw, ln)) 599 if (csio_fcoe_alloc_vnp(hw, ln))
599 goto error; 600 goto error;
600 601
601 *(struct csio_lnode **)fc_vport->dd_data = ln; 602 *(struct csio_lnode **)fc_vport->dd_data = ln;
602 ln->fc_vport = fc_vport;
603 if (!fc_vport->node_name) 603 if (!fc_vport->node_name)
604 fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); 604 fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
605 if (!fc_vport->port_name) 605 if (!fc_vport->port_name)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590ed955..ff943f477d6f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1726 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1726 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
1728 fc_lport_error(lport, fp); 1728 fc_lport_error(lport, fp);
1729 goto err; 1729 goto out;
1730 } 1730 }
1731 1731
1732 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1732 flp = fc_frame_payload_get(fp, sizeof(*flp));
1733 if (!flp) { 1733 if (!flp) {
1734 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1734 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1735 fc_lport_error(lport, fp); 1735 fc_lport_error(lport, fp);
1736 goto err; 1736 goto out;
1737 } 1737 }
1738 1738
1739 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1739 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1744 "lport->mfs:%hu\n", mfs, lport->mfs); 1744 "lport->mfs:%hu\n", mfs, lport->mfs);
1745 fc_lport_error(lport, fp); 1745 fc_lport_error(lport, fp);
1746 goto err; 1746 goto out;
1747 } 1747 }
1748 1748
1749 if (mfs <= lport->mfs) { 1749 if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 4c66b19e6199..8c9f79042228 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
297 lport); 297 lport);
298 298
299 /* release any threads waiting for the unreg to complete */ 299 /* release any threads waiting for the unreg to complete */
300 complete(&lport->lport_unreg_done); 300 if (lport->vport->localport)
301 complete(lport->lport_unreg_cmp);
301} 302}
302 303
303/* lpfc_nvme_remoteport_delete 304/* lpfc_nvme_remoteport_delete
@@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2545 */ 2546 */
2546void 2547void
2547lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, 2548lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2548 struct lpfc_nvme_lport *lport) 2549 struct lpfc_nvme_lport *lport,
2550 struct completion *lport_unreg_cmp)
2549{ 2551{
2550#if (IS_ENABLED(CONFIG_NVME_FC)) 2552#if (IS_ENABLED(CONFIG_NVME_FC))
2551 u32 wait_tmo; 2553 u32 wait_tmo;
@@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2557 */ 2559 */
2558 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); 2560 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2559 while (true) { 2561 while (true) {
2560 ret = wait_for_completion_timeout(&lport->lport_unreg_done, 2562 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2561 wait_tmo);
2562 if (unlikely(!ret)) { 2563 if (unlikely(!ret)) {
2563 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 2564 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2564 "6176 Lport %p Localport %p wait " 2565 "6176 Lport %p Localport %p wait "
@@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2592 struct lpfc_nvme_lport *lport; 2593 struct lpfc_nvme_lport *lport;
2593 struct lpfc_nvme_ctrl_stat *cstat; 2594 struct lpfc_nvme_ctrl_stat *cstat;
2594 int ret; 2595 int ret;
2596 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2595 2597
2596 if (vport->nvmei_support == 0) 2598 if (vport->nvmei_support == 0)
2597 return; 2599 return;
2598 2600
2599 localport = vport->localport; 2601 localport = vport->localport;
2600 vport->localport = NULL;
2601 lport = (struct lpfc_nvme_lport *)localport->private; 2602 lport = (struct lpfc_nvme_lport *)localport->private;
2602 cstat = lport->cstat; 2603 cstat = lport->cstat;
2603 2604
@@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2608 /* lport's rport list is clear. Unregister 2609 /* lport's rport list is clear. Unregister
2609 * lport and release resources. 2610 * lport and release resources.
2610 */ 2611 */
2611 init_completion(&lport->lport_unreg_done); 2612 lport->lport_unreg_cmp = &lport_unreg_cmp;
2612 ret = nvme_fc_unregister_localport(localport); 2613 ret = nvme_fc_unregister_localport(localport);
2613 2614
2614 /* Wait for completion. This either blocks 2615 /* Wait for completion. This either blocks
2615 * indefinitely or succeeds 2616 * indefinitely or succeeds
2616 */ 2617 */
2617 lpfc_nvme_lport_unreg_wait(vport, lport); 2618 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2619 vport->localport = NULL;
2618 kfree(cstat); 2620 kfree(cstat);
2619 2621
2620 /* Regardless of the unregister upcall response, clear 2622 /* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719be25c..b234d0298994 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
50/* Declare nvme-based local and remote port definitions. */ 50/* Declare nvme-based local and remote port definitions. */
51struct lpfc_nvme_lport { 51struct lpfc_nvme_lport {
52 struct lpfc_vport *vport; 52 struct lpfc_vport *vport;
53 struct completion lport_unreg_done; 53 struct completion *lport_unreg_cmp;
54 /* Add stats counters here */ 54 /* Add stats counters here */
55 struct lpfc_nvme_ctrl_stat *cstat; 55 struct lpfc_nvme_ctrl_stat *cstat;
56 atomic_t fc4NvmeLsRequests; 56 atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6245f442d784..95fee83090eb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1003 struct lpfc_nvmet_tgtport *tport = targetport->private; 1003 struct lpfc_nvmet_tgtport *tport = targetport->private;
1004 1004
1005 /* release any threads waiting for the unreg to complete */ 1005 /* release any threads waiting for the unreg to complete */
1006 complete(&tport->tport_unreg_done); 1006 if (tport->phba->targetport)
1007 complete(tport->tport_unreg_cmp);
1007} 1008}
1008 1009
1009static void 1010static void
@@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1692 struct lpfc_nvmet_tgtport *tgtp; 1693 struct lpfc_nvmet_tgtport *tgtp;
1693 struct lpfc_queue *wq; 1694 struct lpfc_queue *wq;
1694 uint32_t qidx; 1695 uint32_t qidx;
1696 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1695 1697
1696 if (phba->nvmet_support == 0) 1698 if (phba->nvmet_support == 0)
1697 return; 1699 return;
@@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1701 wq = phba->sli4_hba.nvme_wq[qidx]; 1703 wq = phba->sli4_hba.nvme_wq[qidx];
1702 lpfc_nvmet_wqfull_flush(phba, wq, NULL); 1704 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1703 } 1705 }
1704 init_completion(&tgtp->tport_unreg_done); 1706 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1705 nvmet_fc_unregister_targetport(phba->targetport); 1707 nvmet_fc_unregister_targetport(phba->targetport);
1706 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1708 wait_for_completion_timeout(&tport_unreg_cmp, 5);
1707 lpfc_nvmet_cleanup_io_context(phba); 1709 lpfc_nvmet_cleanup_io_context(phba);
1708 } 1710 }
1709 phba->targetport = NULL; 1711 phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63f1f41..0ec1082ce7ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
34/* Used for NVME Target */ 34/* Used for NVME Target */
35struct lpfc_nvmet_tgtport { 35struct lpfc_nvmet_tgtport {
36 struct lpfc_hba *phba; 36 struct lpfc_hba *phba;
37 struct completion tport_unreg_done; 37 struct completion *tport_unreg_cmp;
38 38
39 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ 39 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
40 atomic_t rcv_ls_req_in; 40 atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 661512bec3ac..e27f4df24021 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
62 62
63/* make sure inq_product_rev string corresponds to this version */ 63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ 64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20180128"; 65static const char *sdebug_version_date = "20190125";
66 66
67#define MY_NAME "scsi_debug" 67#define MY_NAME "scsi_debug"
68 68
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
736} 736}
737 737
738static void *fake_store(unsigned long long lba) 738static void *lba2fake_store(unsigned long long lba)
739{ 739{
740 lba = do_div(lba, sdebug_store_sectors); 740 lba = do_div(lba, sdebug_store_sectors);
741 741
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2514 return ret; 2514 return ret;
2515} 2515}
2516 2516
2517/* If fake_store(lba,num) compares equal to arr(num), then copy top half of 2517/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2518 * arr into fake_store(lba,num) and return true. If comparison fails then 2518 * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2519 * return false. */ 2519 * return false. */
2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) 2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2521{ 2521{
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2643 if (sdt->app_tag == cpu_to_be16(0xffff)) 2643 if (sdt->app_tag == cpu_to_be16(0xffff))
2644 continue; 2644 continue;
2645 2645
2646 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); 2646 ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2647 if (ret) { 2647 if (ret) {
2648 dif_errors++; 2648 dif_errors++;
2649 return ret; 2649 return ret;
@@ -3261,10 +3261,12 @@ err_out:
3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3262 u32 ei_lba, bool unmap, bool ndob) 3262 u32 ei_lba, bool unmap, bool ndob)
3263{ 3263{
3264 int ret;
3264 unsigned long iflags; 3265 unsigned long iflags;
3265 unsigned long long i; 3266 unsigned long long i;
3266 int ret; 3267 u32 lb_size = sdebug_sector_size;
3267 u64 lba_off; 3268 u64 block, lbaa;
3269 u8 *fs1p;
3268 3270
3269 ret = check_device_access_params(scp, lba, num); 3271 ret = check_device_access_params(scp, lba, num);
3270 if (ret) 3272 if (ret)
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3276 unmap_region(lba, num); 3278 unmap_region(lba, num);
3277 goto out; 3279 goto out;
3278 } 3280 }
3279 3281 lbaa = lba;
3280 lba_off = lba * sdebug_sector_size; 3282 block = do_div(lbaa, sdebug_store_sectors);
3281 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 3283 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3284 fs1p = fake_storep + (block * lb_size);
3282 if (ndob) { 3285 if (ndob) {
3283 memset(fake_storep + lba_off, 0, sdebug_sector_size); 3286 memset(fs1p, 0, lb_size);
3284 ret = 0; 3287 ret = 0;
3285 } else 3288 } else
3286 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, 3289 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3287 sdebug_sector_size);
3288 3290
3289 if (-1 == ret) { 3291 if (-1 == ret) {
3290 write_unlock_irqrestore(&atomic_rw, iflags); 3292 write_unlock_irqrestore(&atomic_rw, iflags);
3291 return DID_ERROR << 16; 3293 return DID_ERROR << 16;
3292 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) 3294 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3293 sdev_printk(KERN_INFO, scp->device, 3295 sdev_printk(KERN_INFO, scp->device,
3294 "%s: %s: lb size=%u, IO sent=%d bytes\n", 3296 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3295 my_name, "write same", 3297 my_name, "write same", lb_size, ret);
3296 sdebug_sector_size, ret);
3297 3298
3298 /* Copy first sector to remaining blocks */ 3299 /* Copy first sector to remaining blocks */
3299 for (i = 1 ; i < num ; i++) 3300 for (i = 1 ; i < num ; i++) {
3300 memcpy(fake_storep + ((lba + i) * sdebug_sector_size), 3301 lbaa = lba + i;
3301 fake_storep + lba_off, 3302 block = do_div(lbaa, sdebug_store_sectors);
3302 sdebug_sector_size); 3303 memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3303 3304 }
3304 if (scsi_debug_lbp()) 3305 if (scsi_debug_lbp())
3305 map_region(lba, num); 3306 map_region(lba, num);
3306out: 3307out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b13cc9288ba0..6d65ac584eba 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1842,8 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1842 blk_queue_segment_boundary(q, shost->dma_boundary); 1842 blk_queue_segment_boundary(q, shost->dma_boundary);
1843 dma_set_seg_boundary(dev, shost->dma_boundary); 1843 dma_set_seg_boundary(dev, shost->dma_boundary);
1844 1844
1845 blk_queue_max_segment_size(q, 1845 blk_queue_max_segment_size(q, shost->max_segment_size);
1846 min(shost->max_segment_size, dma_get_max_seg_size(dev))); 1846 dma_set_max_seg_size(dev, shost->max_segment_size);
1847 1847
1848 /* 1848 /*
1849 * Set a reasonable default alignment: The larger of 32-byte (dword), 1849 * Set a reasonable default alignment: The larger of 32-byte (dword),
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 71334aaf1447..2ddf24466a62 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -108,13 +108,19 @@
108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
109 const char *prefix) 109 const char *prefix)
110{ 110{
111 u8 *regs; 111 u32 *regs;
112 size_t pos;
113
114 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
115 return -EINVAL;
112 116
113 regs = kzalloc(len, GFP_KERNEL); 117 regs = kzalloc(len, GFP_KERNEL);
114 if (!regs) 118 if (!regs)
115 return -ENOMEM; 119 return -ENOMEM;
116 120
117 memcpy_fromio(regs, hba->mmio_base + offset, len); 121 for (pos = 0; pos < len; pos += 4)
122 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
123
118 ufshcd_hex_dump(prefix, regs, len); 124 ufshcd_hex_dump(prefix, regs, len);
119 kfree(regs); 125 kfree(regs);
120 126
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a0802de8c3a1..6f5afab7c1a1 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -248,10 +248,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
248 struct ion_dma_buf_attachment *a = attachment->priv; 248 struct ion_dma_buf_attachment *a = attachment->priv;
249 struct ion_buffer *buffer = dmabuf->priv; 249 struct ion_buffer *buffer = dmabuf->priv;
250 250
251 free_duped_table(a->table);
252 mutex_lock(&buffer->lock); 251 mutex_lock(&buffer->lock);
253 list_del(&a->list); 252 list_del(&a->list);
254 mutex_unlock(&buffer->lock); 253 mutex_unlock(&buffer->lock);
254 free_duped_table(a->table);
255 255
256 kfree(a); 256 kfree(a);
257} 257}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 28cbd6b3d26c..dfee6985efa6 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
35 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 35 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
36 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 36 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
37 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ 37 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
38 {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
38 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ 39 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
39 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 40 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
40 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ 41 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa8e672..9efb4dcb9d3a 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@ enum ieee80211_state {
850#define IP_FMT "%pI4" 850#define IP_FMT "%pI4"
851#define IP_ARG(x) (x) 851#define IP_ARG(x) (x)
852 852
853extern __inline int is_multicast_mac_addr(const u8 *addr) 853static inline int is_multicast_mac_addr(const u8 *addr)
854{ 854{
855 return ((addr[0] != 0xff) && (0x01 & addr[0])); 855 return ((addr[0] != 0xff) && (0x01 & addr[0]));
856} 856}
857 857
858extern __inline int is_broadcast_mac_addr(const u8 *addr) 858static inline int is_broadcast_mac_addr(const u8 *addr)
859{ 859{
860 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ 860 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
861 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); 861 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
862} 862}
863 863
864extern __inline int is_zero_mac_addr(const u8 *addr) 864static inline int is_zero_mac_addr(const u8 *addr)
865{ 865{
866 return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ 866 return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
867 (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); 867 (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 9e17ec651bde..53f5a1cb4636 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
446static inline void 446static inline void
447remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) 447remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
448{ 448{
449 event->fired = 1;
449 event->armed = 0; 450 event->armed = 0;
450 wake_up_all(wq); 451 wake_up_all(wq);
451} 452}
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 70c854d939ce..3d0badc34825 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -36,7 +36,7 @@ struct wilc_op_mode {
36struct wilc_reg_frame { 36struct wilc_reg_frame {
37 bool reg; 37 bool reg;
38 u8 reg_id; 38 u8 reg_id;
39 __le32 frame_type; 39 __le16 frame_type;
40} __packed; 40} __packed;
41 41
42struct wilc_drv_handler { 42struct wilc_drv_handler {
@@ -1744,7 +1744,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
1744 result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, 1744 result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
1745 ARRAY_SIZE(wid_list), 1745 ARRAY_SIZE(wid_list),
1746 wilc_get_vif_idx(vif)); 1746 wilc_get_vif_idx(vif));
1747 kfree(gtk_key);
1748 } else if (mode == WILC_STATION_MODE) { 1747 } else if (mode == WILC_STATION_MODE) {
1749 struct wid wid; 1748 struct wid wid;
1750 1749
@@ -1754,9 +1753,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
1754 wid.val = (u8 *)gtk_key; 1753 wid.val = (u8 *)gtk_key;
1755 result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1, 1754 result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
1756 wilc_get_vif_idx(vif)); 1755 wilc_get_vif_idx(vif));
1757 kfree(gtk_key);
1758 } 1756 }
1759 1757
1758 kfree(gtk_key);
1760 return result; 1759 return result;
1761} 1760}
1762 1761
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 3c5e9e030cad..489e5a5038f8 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1252,21 +1252,22 @@ static u32 init_chip(struct net_device *dev)
1252 ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg); 1252 ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
1253 if (!ret) { 1253 if (!ret) {
1254 netdev_err(dev, "fail read reg 0x1118\n"); 1254 netdev_err(dev, "fail read reg 0x1118\n");
1255 return ret; 1255 goto release;
1256 } 1256 }
1257 reg |= BIT(0); 1257 reg |= BIT(0);
1258 ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); 1258 ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
1259 if (!ret) { 1259 if (!ret) {
1260 netdev_err(dev, "fail write reg 0x1118\n"); 1260 netdev_err(dev, "fail write reg 0x1118\n");
1261 return ret; 1261 goto release;
1262 } 1262 }
1263 ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); 1263 ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
1264 if (!ret) { 1264 if (!ret) {
1265 netdev_err(dev, "fail write reg 0xc0000\n"); 1265 netdev_err(dev, "fail write reg 0xc0000\n");
1266 return ret; 1266 goto release;
1267 } 1267 }
1268 } 1268 }
1269 1269
1270release:
1270 release_bus(wilc, WILC_BUS_RELEASE_ONLY); 1271 release_bus(wilc, WILC_BUS_RELEASE_ONLY);
1271 1272
1272 return ret; 1273 return ret;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c34c88ef3319..5831e0eecea1 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1317,12 +1317,13 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1317 * target_complete_cmd will translate this to LUN COMM FAILURE 1317 * target_complete_cmd will translate this to LUN COMM FAILURE
1318 */ 1318 */
1319 scsi_status = SAM_STAT_CHECK_CONDITION; 1319 scsi_status = SAM_STAT_CHECK_CONDITION;
1320 list_del_init(&cmd->queue_entry);
1320 } else { 1321 } else {
1322 list_del_init(&cmd->queue_entry);
1321 idr_remove(&udev->commands, id); 1323 idr_remove(&udev->commands, id);
1322 tcmu_free_cmd(cmd); 1324 tcmu_free_cmd(cmd);
1323 scsi_status = SAM_STAT_TASK_SET_FULL; 1325 scsi_status = SAM_STAT_TASK_SET_FULL;
1324 } 1326 }
1325 list_del_init(&cmd->queue_entry);
1326 1327
1327 pr_debug("Timing out cmd %u on dev %s that is %s.\n", 1328 pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1328 id, udev->name, is_running ? "inflight" : "queued"); 1329 id, udev->name, is_running ? "inflight" : "queued");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 284cf2c5a8fd..8e1cf4d789be 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
84 struct pci_dev *pci_dev; \ 84 struct pci_dev *pci_dev; \
85 struct platform_device *pdev; \ 85 struct platform_device *pdev; \
86 struct proc_thermal_device *proc_dev; \ 86 struct proc_thermal_device *proc_dev; \
87\ 87 \
88 if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
89 dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
90 return 0; \
91 } \
92 \
88 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ 93 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
89 pdev = to_platform_device(dev); \ 94 pdev = to_platform_device(dev); \
90 proc_dev = platform_get_drvdata(pdev); \ 95 proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
298 *priv = proc_priv; 303 *priv = proc_priv;
299 304
300 ret = proc_thermal_read_ppcc(proc_priv); 305 ret = proc_thermal_read_ppcc(proc_priv);
301 if (!ret) {
302 ret = sysfs_create_group(&dev->kobj,
303 &power_limit_attribute_group);
304
305 }
306 if (ret) 306 if (ret)
307 return ret; 307 return ret;
308 308
@@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
316 316
317 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); 317 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
318 if (IS_ERR(proc_priv->int340x_zone)) { 318 if (IS_ERR(proc_priv->int340x_zone)) {
319 ret = PTR_ERR(proc_priv->int340x_zone); 319 return PTR_ERR(proc_priv->int340x_zone);
320 goto remove_group;
321 } else 320 } else
322 ret = 0; 321 ret = 0;
323 322
@@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
331 330
332remove_zone: 331remove_zone:
333 int340x_thermal_zone_remove(proc_priv->int340x_zone); 332 int340x_thermal_zone_remove(proc_priv->int340x_zone);
334remove_group:
335 sysfs_remove_group(&proc_priv->dev->kobj,
336 &power_limit_attribute_group);
337 333
338 return ret; 334 return ret;
339} 335}
@@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
364 platform_set_drvdata(pdev, proc_priv); 360 platform_set_drvdata(pdev, proc_priv);
365 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; 361 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
366 362
367 return 0; 363 dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
364
365 return sysfs_create_group(&pdev->dev.kobj,
366 &power_limit_attribute_group);
368} 367}
369 368
370static int int3401_remove(struct platform_device *pdev) 369static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
423 proc_priv->soc_dts = intel_soc_dts_iosf_init( 422 proc_priv->soc_dts = intel_soc_dts_iosf_init(
424 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); 423 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
425 424
426 if (proc_priv->soc_dts && pdev->irq) { 425 if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
427 ret = pci_enable_msi(pdev); 426 ret = pci_enable_msi(pdev);
428 if (!ret) { 427 if (!ret) {
429 ret = request_threaded_irq(pdev->irq, NULL, 428 ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
441 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); 440 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
442 } 441 }
443 442
444 return 0; 443 dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
444
445 return sysfs_create_group(&pdev->dev.kobj,
446 &power_limit_attribute_group);
445} 447}
446 448
447static void proc_thermal_pci_remove(struct pci_dev *pdev) 449static void proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 4164414d4c64..8bdf42bc8fc8 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
597 /* too large for caller's buffer */ 597 /* too large for caller's buffer */
598 ret = -EOVERFLOW; 598 ret = -EOVERFLOW;
599 } else { 599 } else {
600 __set_current_state(TASK_RUNNING);
600 if (copy_to_user(buf, rbuf->buf, rbuf->count)) 601 if (copy_to_user(buf, rbuf->buf, rbuf->count))
601 ret = -EFAULT; 602 ret = -EFAULT;
602 else 603 else
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 189ab1212d9a..e441221e04b9 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
1070 1070
1071 ret = 0; 1071 ret = 0;
1072 } 1072 }
1073 }
1074 1073
1075 /* Initialise interrupt backoff work if required */ 1074 /* Initialise interrupt backoff work if required */
1076 if (up->overrun_backoff_time_ms > 0) { 1075 if (up->overrun_backoff_time_ms > 0) {
1077 uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms; 1076 uart->overrun_backoff_time_ms =
1078 INIT_DELAYED_WORK(&uart->overrun_backoff, 1077 up->overrun_backoff_time_ms;
1079 serial_8250_overrun_backoff_work); 1078 INIT_DELAYED_WORK(&uart->overrun_backoff,
1080 } else { 1079 serial_8250_overrun_backoff_work);
1081 uart->overrun_backoff_time_ms = 0; 1080 } else {
1081 uart->overrun_backoff_time_ms = 0;
1082 }
1082 } 1083 }
1083 1084
1084 mutex_unlock(&serial_mutex); 1085 mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
index e1a551aae336..ce81523c3113 100644
--- a/drivers/tty/serial/earlycon-riscv-sbi.c
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -10,13 +10,16 @@
10#include <linux/serial_core.h> 10#include <linux/serial_core.h>
11#include <asm/sbi.h> 11#include <asm/sbi.h>
12 12
13static void sbi_console_write(struct console *con, 13static void sbi_putc(struct uart_port *port, int c)
14 const char *s, unsigned int n)
15{ 14{
16 int i; 15 sbi_console_putchar(c);
16}
17 17
18 for (i = 0; i < n; ++i) 18static void sbi_console_write(struct console *con,
19 sbi_console_putchar(s[i]); 19 const char *s, unsigned n)
20{
21 struct earlycon_device *dev = con->data;
22 uart_console_write(&dev->port, s, n, sbi_putc);
20} 23}
21 24
22static int __init early_sbi_setup(struct earlycon_device *device, 25static int __init early_sbi_setup(struct earlycon_device *device,
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 241a48e5052c..debdd1b9e01a 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1697,7 +1697,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
1697 } 1697 }
1698 1698
1699 /* ask the core to calculate the divisor */ 1699 /* ask the core to calculate the divisor */
1700 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); 1700 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
1701 1701
1702 spin_lock_irqsave(&sport->port.lock, flags); 1702 spin_lock_irqsave(&sport->port.lock, flags);
1703 1703
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index a72d6d9fb983..38016609c7fa 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -225,7 +225,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
225 unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; 225 unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
226 u32 geni_ios; 226 u32 geni_ios;
227 227
228 if (uart_console(uport) || !uart_cts_enabled(uport)) { 228 if (uart_console(uport)) {
229 mctrl |= TIOCM_CTS; 229 mctrl |= TIOCM_CTS;
230 } else { 230 } else {
231 geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); 231 geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -241,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
241{ 241{
242 u32 uart_manual_rfr = 0; 242 u32 uart_manual_rfr = 0;
243 243
244 if (uart_console(uport) || !uart_cts_enabled(uport)) 244 if (uart_console(uport))
245 return; 245 return;
246 246
247 if (!(mctrl & TIOCM_RTS)) 247 if (!(mctrl & TIOCM_RTS))
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d4cca5bdaf1c..5c01bb6d1c24 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
550 int ret = 0; 550 int ret = 0;
551 551
552 circ = &state->xmit; 552 circ = &state->xmit;
553 if (!circ->buf) 553 port = uart_port_lock(state, flags);
554 if (!circ->buf) {
555 uart_port_unlock(port, flags);
554 return 0; 556 return 0;
557 }
555 558
556 port = uart_port_lock(state, flags);
557 if (port && uart_circ_chars_free(circ) != 0) { 559 if (port && uart_circ_chars_free(circ) != 0) {
558 circ->buf[circ->head] = c; 560 circ->buf[circ->head] = c;
559 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); 561 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty,
586 return -EL3HLT; 588 return -EL3HLT;
587 } 589 }
588 590
591 port = uart_port_lock(state, flags);
589 circ = &state->xmit; 592 circ = &state->xmit;
590 if (!circ->buf) 593 if (!circ->buf) {
594 uart_port_unlock(port, flags);
591 return 0; 595 return 0;
596 }
592 597
593 port = uart_port_lock(state, flags);
594 while (port) { 598 while (port) {
595 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); 599 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
596 if (count < c) 600 if (count < c)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 23c6fd238422..21ffcce16927 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2189,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2189 ld = tty_ldisc_ref_wait(tty); 2189 ld = tty_ldisc_ref_wait(tty);
2190 if (!ld) 2190 if (!ld)
2191 return -EIO; 2191 return -EIO;
2192 ld->ops->receive_buf(tty, &ch, &mbz, 1); 2192 if (ld->ops->receive_buf)
2193 ld->ops->receive_buf(tty, &ch, &mbz, 1);
2193 tty_ldisc_deref(ld); 2194 tty_ldisc_deref(ld);
2194 return 0; 2195 return 0;
2195} 2196}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 41ec8e5010f3..bba75560d11e 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1272 if (con_is_visible(vc)) 1272 if (con_is_visible(vc))
1273 update_screen(vc); 1273 update_screen(vc);
1274 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); 1274 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
1275 notify_update(vc);
1275 return err; 1276 return err;
1276} 1277}
1277 1278
@@ -2764,8 +2765,8 @@ rescan_last_byte:
2764 con_flush(vc, draw_from, draw_to, &draw_x); 2765 con_flush(vc, draw_from, draw_to, &draw_x);
2765 vc_uniscr_debug_check(vc); 2766 vc_uniscr_debug_check(vc);
2766 console_conditional_schedule(); 2767 console_conditional_schedule();
2767 console_unlock();
2768 notify_update(vc); 2768 notify_update(vc);
2769 console_unlock();
2769 return n; 2770 return n;
2770} 2771}
2771 2772
@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2884 unsigned char c; 2885 unsigned char c;
2885 static DEFINE_SPINLOCK(printing_lock); 2886 static DEFINE_SPINLOCK(printing_lock);
2886 const ushort *start; 2887 const ushort *start;
2887 ushort cnt = 0; 2888 ushort start_x, cnt;
2888 ushort myx;
2889 int kmsg_console; 2889 int kmsg_console;
2890 2890
2891 /* console busy or not yet initialized */ 2891 /* console busy or not yet initialized */
@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2898 if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) 2898 if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
2899 vc = vc_cons[kmsg_console - 1].d; 2899 vc = vc_cons[kmsg_console - 1].d;
2900 2900
2901 /* read `x' only after setting currcons properly (otherwise
2902 the `x' macro will read the x of the foreground console). */
2903 myx = vc->vc_x;
2904
2905 if (!vc_cons_allocated(fg_console)) { 2901 if (!vc_cons_allocated(fg_console)) {
2906 /* impossible */ 2902 /* impossible */
2907 /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ 2903 /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2916 hide_cursor(vc); 2912 hide_cursor(vc);
2917 2913
2918 start = (ushort *)vc->vc_pos; 2914 start = (ushort *)vc->vc_pos;
2919 2915 start_x = vc->vc_x;
2920 /* Contrived structure to try to emulate original need_wrap behaviour 2916 cnt = 0;
2921 * Problems caused when we have need_wrap set on '\n' character */
2922 while (count--) { 2917 while (count--) {
2923 c = *b++; 2918 c = *b++;
2924 if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { 2919 if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
2925 if (cnt > 0) { 2920 if (cnt && con_is_visible(vc))
2926 if (con_is_visible(vc)) 2921 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2927 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); 2922 cnt = 0;
2928 vc->vc_x += cnt;
2929 if (vc->vc_need_wrap)
2930 vc->vc_x--;
2931 cnt = 0;
2932 }
2933 if (c == 8) { /* backspace */ 2923 if (c == 8) { /* backspace */
2934 bs(vc); 2924 bs(vc);
2935 start = (ushort *)vc->vc_pos; 2925 start = (ushort *)vc->vc_pos;
2936 myx = vc->vc_x; 2926 start_x = vc->vc_x;
2937 continue; 2927 continue;
2938 } 2928 }
2939 if (c != 13) 2929 if (c != 13)
2940 lf(vc); 2930 lf(vc);
2941 cr(vc); 2931 cr(vc);
2942 start = (ushort *)vc->vc_pos; 2932 start = (ushort *)vc->vc_pos;
2943 myx = vc->vc_x; 2933 start_x = vc->vc_x;
2944 if (c == 10 || c == 13) 2934 if (c == 10 || c == 13)
2945 continue; 2935 continue;
2946 } 2936 }
2937 vc_uniscr_putc(vc, c);
2947 scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); 2938 scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
2948 notify_write(vc, c); 2939 notify_write(vc, c);
2949 cnt++; 2940 cnt++;
2950 if (myx == vc->vc_cols - 1) { 2941 if (vc->vc_x == vc->vc_cols - 1) {
2951 vc->vc_need_wrap = 1;
2952 continue;
2953 }
2954 vc->vc_pos += 2;
2955 myx++;
2956 }
2957 if (cnt > 0) {
2958 if (con_is_visible(vc))
2959 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2960 vc->vc_x += cnt;
2961 if (vc->vc_x == vc->vc_cols) {
2962 vc->vc_x--;
2963 vc->vc_need_wrap = 1; 2942 vc->vc_need_wrap = 1;
2943 } else {
2944 vc->vc_pos += 2;
2945 vc->vc_x++;
2964 } 2946 }
2965 } 2947 }
2948 if (cnt && con_is_visible(vc))
2949 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2966 set_cursor(vc); 2950 set_cursor(vc);
2967 notify_update(vc); 2951 notify_update(vc);
2968 2952
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index e81de9ca8729..9b45aa422e69 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -316,7 +316,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
316 if (IS_ERR(data->usbmisc_data)) 316 if (IS_ERR(data->usbmisc_data))
317 return PTR_ERR(data->usbmisc_data); 317 return PTR_ERR(data->usbmisc_data);
318 318
319 if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) { 319 if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC)
320 && data->usbmisc_data) {
320 pdata.flags |= CI_HDRC_IMX_IS_HSIC; 321 pdata.flags |= CI_HDRC_IMX_IS_HSIC;
321 data->usbmisc_data->hsic = 1; 322 data->usbmisc_data->hsic = 1;
322 data->pinctrl = devm_pinctrl_get(dev); 323 data->pinctrl = devm_pinctrl_get(dev);
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index dc7f7fd71684..c12ac56606c3 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
119 .attrs = ports_attrs, 119 .attrs = ports_attrs,
120}; 120};
121 121
122static const struct attribute_group *ports_groups[] = {
123 &ports_group,
124 NULL
125};
126
127/*************************************** 122/***************************************
128 * Adding & removing ports 123 * Adding & removing ports
129 ***************************************/ 124 ***************************************/
@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
307static int usbport_trig_activate(struct led_classdev *led_cdev) 302static int usbport_trig_activate(struct led_classdev *led_cdev)
308{ 303{
309 struct usbport_trig_data *usbport_data; 304 struct usbport_trig_data *usbport_data;
305 int err;
310 306
311 usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); 307 usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
312 if (!usbport_data) 308 if (!usbport_data)
@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
315 311
316 /* List of ports */ 312 /* List of ports */
317 INIT_LIST_HEAD(&usbport_data->ports); 313 INIT_LIST_HEAD(&usbport_data->ports);
314 err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
315 if (err)
316 goto err_free;
318 usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); 317 usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
319 usbport_trig_update_count(usbport_data); 318 usbport_trig_update_count(usbport_data);
320 319
@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
322 usbport_data->nb.notifier_call = usbport_trig_notify; 321 usbport_data->nb.notifier_call = usbport_trig_notify;
323 led_set_trigger_data(led_cdev, usbport_data); 322 led_set_trigger_data(led_cdev, usbport_data);
324 usb_register_notify(&usbport_data->nb); 323 usb_register_notify(&usbport_data->nb);
325
326 return 0; 324 return 0;
325
326err_free:
327 kfree(usbport_data);
328 return err;
327} 329}
328 330
329static void usbport_trig_deactivate(struct led_classdev *led_cdev) 331static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
335 usbport_trig_remove_port(usbport_data, port); 337 usbport_trig_remove_port(usbport_data, port);
336 } 338 }
337 339
340 sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
341
338 usb_unregister_notify(&usbport_data->nb); 342 usb_unregister_notify(&usbport_data->nb);
339 343
340 kfree(usbport_data); 344 kfree(usbport_data);
@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
344 .name = "usbport", 348 .name = "usbport",
345 .activate = usbport_trig_activate, 349 .activate = usbport_trig_activate,
346 .deactivate = usbport_trig_deactivate, 350 .deactivate = usbport_trig_deactivate,
347 .groups = ports_groups,
348}; 351};
349 352
350static int __init usbport_trig_init(void) 353static int __init usbport_trig_init(void)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 68ad75a7460d..55ef3cc2701b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
261 261
262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { 262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); 263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
264 dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); 264 dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); 265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
266 } 266 }
267} 267}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07bd31bb2f8a..bed2ff42780b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
177 req->started = false; 177 req->started = false;
178 list_del(&req->list); 178 list_del(&req->list);
179 req->remaining = 0; 179 req->remaining = 0;
180 req->needs_extra_trb = false;
180 181
181 if (req->request.status == -EINPROGRESS) 182 if (req->request.status == -EINPROGRESS)
182 req->request.status = status; 183 req->request.status = status;
@@ -1984,6 +1985,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
1984 1985
1985 /* begin to receive SETUP packets */ 1986 /* begin to receive SETUP packets */
1986 dwc->ep0state = EP0_SETUP_PHASE; 1987 dwc->ep0state = EP0_SETUP_PHASE;
1988 dwc->link_state = DWC3_LINK_STATE_SS_DIS;
1987 dwc3_ep0_out_start(dwc); 1989 dwc3_ep0_out_start(dwc);
1988 1990
1989 dwc3_gadget_enable_irq(dwc); 1991 dwc3_gadget_enable_irq(dwc);
@@ -3379,6 +3381,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
3379 dwc3_disconnect_gadget(dwc); 3381 dwc3_disconnect_gadget(dwc);
3380 __dwc3_gadget_stop(dwc); 3382 __dwc3_gadget_stop(dwc);
3381 3383
3384 synchronize_irq(dwc->irq_gadget);
3385
3382 return 0; 3386 return 0;
3383} 3387}
3384 3388
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef108fb1b..ed68a4860b7d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
838 838
839 ss = kzalloc(sizeof(*ss), GFP_KERNEL); 839 ss = kzalloc(sizeof(*ss), GFP_KERNEL);
840 if (!ss) 840 if (!ss)
841 return NULL; 841 return ERR_PTR(-ENOMEM);
842 842
843 ss_opts = container_of(fi, struct f_ss_opts, func_inst); 843 ss_opts = container_of(fi, struct f_ss_opts, func_inst);
844 844
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index f26109eafdbf..66ec1fdf9fe7 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
302MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); 302MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
303MODULE_ALIAS("mv-ehci"); 303MODULE_ALIAS("mv-ehci");
304MODULE_LICENSE("GPL"); 304MODULE_LICENSE("GPL");
305MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ab2a6191013..77ef4c481f3c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
1783 int result; 1783 int result;
1784 u16 val; 1784 u16 val;
1785 1785
1786 result = usb_autopm_get_interface(serial->interface);
1787 if (result)
1788 return result;
1789
1786 val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; 1790 val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
1787 result = usb_control_msg(serial->dev, 1791 result = usb_control_msg(serial->dev,
1788 usb_sndctrlpipe(serial->dev, 0), 1792 usb_sndctrlpipe(serial->dev, 0),
@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
1795 val, result); 1799 val, result);
1796 } 1800 }
1797 1801
1802 usb_autopm_put_interface(serial->interface);
1803
1798 return result; 1804 return result;
1799} 1805}
1800 1806
@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
1846 unsigned char *buf; 1852 unsigned char *buf;
1847 int result; 1853 int result;
1848 1854
1855 result = usb_autopm_get_interface(serial->interface);
1856 if (result)
1857 return result;
1858
1849 buf = kmalloc(1, GFP_KERNEL); 1859 buf = kmalloc(1, GFP_KERNEL);
1850 if (!buf) 1860 if (!buf) {
1861 usb_autopm_put_interface(serial->interface);
1851 return -ENOMEM; 1862 return -ENOMEM;
1863 }
1852 1864
1853 result = usb_control_msg(serial->dev, 1865 result = usb_control_msg(serial->dev,
1854 usb_rcvctrlpipe(serial->dev, 0), 1866 usb_rcvctrlpipe(serial->dev, 0),
@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
1863 } 1875 }
1864 1876
1865 kfree(buf); 1877 kfree(buf);
1878 usb_autopm_put_interface(serial->interface);
1866 1879
1867 return result; 1880 return result;
1868} 1881}
diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h
index 09e21e84fc4e..a68f1fb25b8a 100644
--- a/drivers/usb/serial/keyspan_usa26msg.h
+++ b/drivers/usb/serial/keyspan_usa26msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa26msg.h 3 usa26msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa28msg.h b/drivers/usb/serial/keyspan_usa28msg.h
index dee454c4609a..a19f3fe5d98d 100644
--- a/drivers/usb/serial/keyspan_usa28msg.h
+++ b/drivers/usb/serial/keyspan_usa28msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa28msg.h 3 usa28msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa49msg.h b/drivers/usb/serial/keyspan_usa49msg.h
index 163b2dea2ec5..8c3970fdd868 100644
--- a/drivers/usb/serial/keyspan_usa49msg.h
+++ b/drivers/usb/serial/keyspan_usa49msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa49msg.h 3 usa49msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h
index 20fa3e2f7187..dcf502fdbb44 100644
--- a/drivers/usb/serial/keyspan_usa67msg.h
+++ b/drivers/usb/serial/keyspan_usa67msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa67msg.h 3 usa67msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h
index 86708ecd8735..c4ca0f631d20 100644
--- a/drivers/usb/serial/keyspan_usa90msg.h
+++ b/drivers/usb/serial/keyspan_usa90msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa90msg.h 3 usa90msg.h
3 4
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 98e7a5df0f6d..bb3f9aa4a909 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, 46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, 47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
48 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, 48 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
49 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 51 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 52 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 4e2554d55362..559941ca884d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -8,6 +8,7 @@
8 8
9#define PL2303_VENDOR_ID 0x067b 9#define PL2303_VENDOR_ID 0x067b
10#define PL2303_PRODUCT_ID 0x2303 10#define PL2303_PRODUCT_ID 0x2303
11#define PL2303_PRODUCT_ID_TB 0x2304
11#define PL2303_PRODUCT_ID_RSAQ2 0x04bb 12#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
12#define PL2303_PRODUCT_ID_DCU11 0x1234 13#define PL2303_PRODUCT_ID_DCU11 0x1234
13#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 14#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
@@ -20,6 +21,7 @@
20#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 21#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
21#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 22#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
22 23
24
23#define ATEN_VENDOR_ID 0x0557 25#define ATEN_VENDOR_ID 0x0557
24#define ATEN_VENDOR_ID2 0x0547 26#define ATEN_VENDOR_ID2 0x0547
25#define ATEN_PRODUCT_ID 0x2008 27#define ATEN_PRODUCT_ID 0x2008
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4d0273508043..edbbb13d6de6 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
85/* Motorola Tetra driver */ 85/* Motorola Tetra driver */
86#define MOTOROLA_TETRA_IDS() \ 86#define MOTOROLA_TETRA_IDS() \
87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ 87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
88 { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ 88 { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
89 { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
89DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); 90DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
90 91
91/* Novatel Wireless GPS driver */ 92/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README
deleted file mode 100644
index 41a2cf2e77a6..000000000000
--- a/drivers/usb/usbip/README
+++ /dev/null
@@ -1,7 +0,0 @@
1TODO:
2 - more discussion about the protocol
3 - testing
4 - review of the userspace interface
5 - document the protocol
6
7Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
index 4d13e510590e..b2aa986ab9ed 100644
--- a/drivers/vfio/pci/trace.h
+++ b/drivers/vfio/pci/trace.h
@@ -1,13 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */ 1/* SPDX-License-Identifier: GPL-2.0-only */
2/* 2/*
3 * VFIO PCI mmap/mmap_fault tracepoints 3 * VFIO PCI mmap/mmap_fault tracepoints
4 * 4 *
5 * Copyright (C) 2018 IBM Corp. All rights reserved. 5 * Copyright (C) 2018 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru> 6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 7 */
12 8
13#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 054a2cf9dd8e..32f695ffe128 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -1,14 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. 3 * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
4 * 4 *
5 * Copyright (C) 2018 IBM Corp. All rights reserved. 5 * Copyright (C) 2018 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru> 6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Register an on-GPU RAM region for cacheable access. 8 * Register an on-GPU RAM region for cacheable access.
13 * 9 *
14 * Derived from original vfio_pci_igd.c: 10 * Derived from original vfio_pci_igd.c:
@@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
178 struct vfio_pci_region *region, struct vfio_info_cap *caps) 174 struct vfio_pci_region *region, struct vfio_info_cap *caps)
179{ 175{
180 struct vfio_pci_nvgpu_data *data = region->data; 176 struct vfio_pci_nvgpu_data *data = region->data;
181 struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 }; 177 struct vfio_region_info_cap_nvlink2_ssatgt cap = {
182 178 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
183 cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; 179 .header.version = 1,
184 cap.header.version = 1; 180 .tgt = data->gpu_tgt
185 cap.tgt = data->gpu_tgt; 181 };
186 182
187 return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); 183 return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
188} 184}
@@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
365 struct vfio_pci_region *region, struct vfio_info_cap *caps) 361 struct vfio_pci_region *region, struct vfio_info_cap *caps)
366{ 362{
367 struct vfio_pci_npu2_data *data = region->data; 363 struct vfio_pci_npu2_data *data = region->data;
368 struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 }; 364 struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 }; 365 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
366 .header.version = 1,
367 .tgt = data->gpu_tgt
368 };
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
370 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
371 .header.version = 1,
372 .link_speed = data->link_speed
373 };
370 int ret; 374 int ret;
371 375
372 captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
373 captgt.header.version = 1;
374 captgt.tgt = data->gpu_tgt;
375
376 capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD;
377 capspd.header.version = 1;
378 capspd.link_speed = data->link_speed;
379
380 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); 376 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
381 if (ret) 377 if (ret)
382 return ret; 378 return ret;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index bca86bf7189f..df51a35cf537 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1337,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1337 n->vqs[i].rx_ring = NULL; 1337 n->vqs[i].rx_ring = NULL;
1338 vhost_net_buf_init(&n->vqs[i].rxq); 1338 vhost_net_buf_init(&n->vqs[i].rxq);
1339 } 1339 }
1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1341 UIO_MAXIOV + VHOST_NET_BATCH);
1341 1342
1342 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1343 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 344684f3e2e4..23593cb23dd0 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1627,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1627 vqs[i] = &vs->vqs[i].vq; 1627 vqs[i] = &vs->vqs[i].vq;
1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1629 } 1629 }
1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
1631 1631
1632 vhost_scsi_init_inflight(vs, NULL); 1632 vhost_scsi_init_inflight(vs, NULL);
1633 1633
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 15a216cdd507..24a129fcdd61 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
390 vq->indirect = kmalloc_array(UIO_MAXIOV, 390 vq->indirect = kmalloc_array(UIO_MAXIOV,
391 sizeof(*vq->indirect), 391 sizeof(*vq->indirect),
392 GFP_KERNEL); 392 GFP_KERNEL);
393 vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), 393 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
394 GFP_KERNEL); 394 GFP_KERNEL);
395 vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), 395 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
396 GFP_KERNEL); 396 GFP_KERNEL);
397 if (!vq->indirect || !vq->log || !vq->heads) 397 if (!vq->indirect || !vq->log || !vq->heads)
398 goto err_nomem; 398 goto err_nomem;
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
414} 414}
415 415
416void vhost_dev_init(struct vhost_dev *dev, 416void vhost_dev_init(struct vhost_dev *dev,
417 struct vhost_virtqueue **vqs, int nvqs) 417 struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
418{ 418{
419 struct vhost_virtqueue *vq; 419 struct vhost_virtqueue *vq;
420 int i; 420 int i;
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
427 dev->iotlb = NULL; 427 dev->iotlb = NULL;
428 dev->mm = NULL; 428 dev->mm = NULL;
429 dev->worker = NULL; 429 dev->worker = NULL;
430 dev->iov_limit = iov_limit;
430 init_llist_head(&dev->work_list); 431 init_llist_head(&dev->work_list);
431 init_waitqueue_head(&dev->wait); 432 init_waitqueue_head(&dev->wait);
432 INIT_LIST_HEAD(&dev->read_list); 433 INIT_LIST_HEAD(&dev->read_list);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1b675dad5e05..9490e7ddb340 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -170,9 +170,11 @@ struct vhost_dev {
170 struct list_head read_list; 170 struct list_head read_list;
171 struct list_head pending_list; 171 struct list_head pending_list;
172 wait_queue_head_t wait; 172 wait_queue_head_t wait;
173 int iov_limit;
173}; 174};
174 175
175void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 176void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
177 int nvqs, int iov_limit);
176long vhost_dev_set_owner(struct vhost_dev *dev); 178long vhost_dev_set_owner(struct vhost_dev *dev);
177bool vhost_dev_has_owner(struct vhost_dev *dev); 179bool vhost_dev_has_owner(struct vhost_dev *dev);
178long vhost_dev_check_owner(struct vhost_dev *); 180long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 3fbc068eaa9b..bb5fc0e9fbc2 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
533 533
534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); 534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
535 535
536 file->private_data = vsock; 536 file->private_data = vsock;
537 spin_lock_init(&vsock->send_pkt_list_lock); 537 spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 09731b2f6815..c6b3bdbbdbc9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
271 271
272static void vgacon_restore_screen(struct vc_data *c) 272static void vgacon_restore_screen(struct vc_data *c)
273{ 273{
274 c->vc_origin = c->vc_visible_origin;
274 vgacon_scrollback_cur->save = 0; 275 vgacon_scrollback_cur->save = 0;
275 276
276 if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { 277 if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
287 int start, end, count, soff; 288 int start, end, count, soff;
288 289
289 if (!lines) { 290 if (!lines) {
290 c->vc_visible_origin = c->vc_origin; 291 vgacon_restore_screen(c);
291 vga_set_mem_top(c);
292 return; 292 return;
293 } 293 }
294 294
@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
298 if (!vgacon_scrollback_cur->save) { 298 if (!vgacon_scrollback_cur->save) {
299 vgacon_cursor(c, CM_ERASE); 299 vgacon_cursor(c, CM_ERASE);
300 vgacon_save_screen(c); 300 vgacon_save_screen(c);
301 c->vc_origin = (unsigned long)c->vc_screenbuf;
301 vgacon_scrollback_cur->save = 1; 302 vgacon_scrollback_cur->save = 1;
302 } 303 }
303 304
@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
335 int copysize; 336 int copysize;
336 337
337 int diff = c->vc_rows - count; 338 int diff = c->vc_rows - count;
338 void *d = (void *) c->vc_origin; 339 void *d = (void *) c->vc_visible_origin;
339 void *s = (void *) c->vc_screenbuf; 340 void *s = (void *) c->vc_screenbuf;
340 341
341 count *= c->vc_size_row; 342 count *= c->vc_size_row;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cd7e755484e3..a0b07c331255 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -152,7 +152,12 @@ struct vring_virtqueue {
152 /* Available for packed ring */ 152 /* Available for packed ring */
153 struct { 153 struct {
154 /* Actual memory layout for this queue. */ 154 /* Actual memory layout for this queue. */
155 struct vring_packed vring; 155 struct {
156 unsigned int num;
157 struct vring_packed_desc *desc;
158 struct vring_packed_desc_event *driver;
159 struct vring_packed_desc_event *device;
160 } vring;
156 161
157 /* Driver ring wrap counter. */ 162 /* Driver ring wrap counter. */
158 bool avail_wrap_counter; 163 bool avail_wrap_counter;
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
1609 !context; 1614 !context;
1610 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1615 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1611 1616
1617 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1618 vq->weak_barriers = false;
1619
1612 vq->packed.ring_dma_addr = ring_dma_addr; 1620 vq->packed.ring_dma_addr = ring_dma_addr;
1613 vq->packed.driver_event_dma_addr = driver_event_dma_addr; 1621 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1614 vq->packed.device_event_dma_addr = device_event_dma_addr; 1622 vq->packed.device_event_dma_addr = device_event_dma_addr;
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
2079 !context; 2087 !context;
2080 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 2088 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2081 2089
2090 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2091 vq->weak_barriers = false;
2092
2082 vq->split.queue_dma_addr = 0; 2093 vq->split.queue_dma_addr = 0;
2083 vq->split.queue_size_in_bytes = 0; 2094 vq->split.queue_size_in_bytes = 0;
2084 2095
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev)
2213 break; 2224 break;
2214 case VIRTIO_F_RING_PACKED: 2225 case VIRTIO_F_RING_PACKED:
2215 break; 2226 break;
2227 case VIRTIO_F_ORDER_PLATFORM:
2228 break;
2216 default: 2229 default:
2217 /* We don't understand this bit. */ 2230 /* We don't understand this bit. */
2218 __virtio_clear_bit(vdev, i); 2231 __virtio_clear_bit(vdev, i);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 989cf872b98c..bb7888429be6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
645 void *cpu_addr, dma_addr_t dma_addr, size_t size, 645 void *cpu_addr, dma_addr_t dma_addr, size_t size,
646 unsigned long attrs) 646 unsigned long attrs)
647{ 647{
648#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 648#ifdef CONFIG_ARM
649 if (xen_get_dma_ops(dev)->mmap) 649 if (xen_get_dma_ops(dev)->mmap)
650 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, 650 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
651 dma_addr, size, attrs); 651 dma_addr, size, attrs);
@@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
662 void *cpu_addr, dma_addr_t handle, size_t size, 662 void *cpu_addr, dma_addr_t handle, size_t size,
663 unsigned long attrs) 663 unsigned long attrs)
664{ 664{
665#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 665#ifdef CONFIG_ARM
666 if (xen_get_dma_ops(dev)->get_sgtable) { 666 if (xen_get_dma_ops(dev)->get_sgtable) {
667#if 0 667#if 0
668 /* 668 /*
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244b79df..28d9c2b1b3bb 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
596 pkt.len = dentry->d_name.len; 596 pkt.len = dentry->d_name.len;
597 memcpy(pkt.name, dentry->d_name.name, pkt.len); 597 memcpy(pkt.name, dentry->d_name.name, pkt.len);
598 pkt.name[pkt.len] = '\0'; 598 pkt.name[pkt.len] = '\0';
599 dput(dentry);
600 599
601 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) 600 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
602 ret = -EFAULT; 601 ret = -EFAULT;
@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
609 complete_all(&ino->expire_complete); 608 complete_all(&ino->expire_complete);
610 spin_unlock(&sbi->fs_lock); 609 spin_unlock(&sbi->fs_lock);
611 610
611 dput(dentry);
612
612 return ret; 613 return ret;
613} 614}
614 615
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 0e8ea2d9a2bb..078992eee299 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
266 } 266 }
267 root_inode = autofs_get_inode(s, S_IFDIR | 0755); 267 root_inode = autofs_get_inode(s, S_IFDIR | 0755);
268 root = d_make_root(root_inode); 268 root = d_make_root(root_inode);
269 if (!root) 269 if (!root) {
270 ret = -ENOMEM;
270 goto fail_ino; 271 goto fail_ino;
272 }
271 pipe = NULL; 273 pipe = NULL;
272 274
273 root->d_fsdata = ino; 275 root->d_fsdata = ino;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f64aad613727..5a6c39b44c84 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
968 return 0; 968 return 0;
969} 969}
970 970
971static struct extent_buffer *alloc_tree_block_no_bg_flush(
972 struct btrfs_trans_handle *trans,
973 struct btrfs_root *root,
974 u64 parent_start,
975 const struct btrfs_disk_key *disk_key,
976 int level,
977 u64 hint,
978 u64 empty_size)
979{
980 struct btrfs_fs_info *fs_info = root->fs_info;
981 struct extent_buffer *ret;
982
983 /*
984 * If we are COWing a node/leaf from the extent, chunk, device or free
985 * space trees, make sure that we do not finish block group creation of
986 * pending block groups. We do this to avoid a deadlock.
987 * COWing can result in allocation of a new chunk, and flushing pending
988 * block groups (btrfs_create_pending_block_groups()) can be triggered
989 * when finishing allocation of a new chunk. Creation of a pending block
990 * group modifies the extent, chunk, device and free space trees,
991 * therefore we could deadlock with ourselves since we are holding a
992 * lock on an extent buffer that btrfs_create_pending_block_groups() may
993 * try to COW later.
994 * For similar reasons, we also need to delay flushing pending block
995 * groups when splitting a leaf or node, from one of those trees, since
996 * we are holding a write lock on it and its parent or when inserting a
997 * new root node for one of those trees.
998 */
999 if (root == fs_info->extent_root ||
1000 root == fs_info->chunk_root ||
1001 root == fs_info->dev_root ||
1002 root == fs_info->free_space_root)
1003 trans->can_flush_pending_bgs = false;
1004
1005 ret = btrfs_alloc_tree_block(trans, root, parent_start,
1006 root->root_key.objectid, disk_key, level,
1007 hint, empty_size);
1008 trans->can_flush_pending_bgs = true;
1009
1010 return ret;
1011}
1012
971/* 1013/*
972 * does the dirty work in cow of a single block. The parent block (if 1014 * does the dirty work in cow of a single block. The parent block (if
973 * supplied) is updated to point to the new cow copy. The new buffer is marked 1015 * supplied) is updated to point to the new cow copy. The new buffer is marked
@@ -1015,28 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1015 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) 1057 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1016 parent_start = parent->start; 1058 parent_start = parent->start;
1017 1059
1018 /* 1060 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1019 * If we are COWing a node/leaf from the extent, chunk, device or free 1061 level, search_start, empty_size);
1020 * space trees, make sure that we do not finish block group creation of
1021 * pending block groups. We do this to avoid a deadlock.
1022 * COWing can result in allocation of a new chunk, and flushing pending
1023 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 * when finishing allocation of a new chunk. Creation of a pending block
1025 * group modifies the extent, chunk, device and free space trees,
1026 * therefore we could deadlock with ourselves since we are holding a
1027 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1028 * try to COW later.
1029 */
1030 if (root == fs_info->extent_root ||
1031 root == fs_info->chunk_root ||
1032 root == fs_info->dev_root ||
1033 root == fs_info->free_space_root)
1034 trans->can_flush_pending_bgs = false;
1035
1036 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1037 root->root_key.objectid, &disk_key, level,
1038 search_start, empty_size);
1039 trans->can_flush_pending_bgs = true;
1040 if (IS_ERR(cow)) 1062 if (IS_ERR(cow))
1041 return PTR_ERR(cow); 1063 return PTR_ERR(cow);
1042 1064
@@ -3345,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3345 else 3367 else
3346 btrfs_node_key(lower, &lower_key, 0); 3368 btrfs_node_key(lower, &lower_key, 0);
3347 3369
3348 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3370 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3349 &lower_key, level, root->node->start, 0); 3371 root->node->start, 0);
3350 if (IS_ERR(c)) 3372 if (IS_ERR(c))
3351 return PTR_ERR(c); 3373 return PTR_ERR(c);
3352 3374
@@ -3475,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3475 mid = (c_nritems + 1) / 2; 3497 mid = (c_nritems + 1) / 2;
3476 btrfs_node_key(c, &disk_key, mid); 3498 btrfs_node_key(c, &disk_key, mid);
3477 3499
3478 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 3500 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3479 &disk_key, level, c->start, 0); 3501 c->start, 0);
3480 if (IS_ERR(split)) 3502 if (IS_ERR(split))
3481 return PTR_ERR(split); 3503 return PTR_ERR(split);
3482 3504
@@ -4260,8 +4282,8 @@ again:
4260 else 4282 else
4261 btrfs_item_key(l, &disk_key, mid); 4283 btrfs_item_key(l, &disk_key, mid);
4262 4284
4263 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, 4285 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4264 &disk_key, 0, l->start, 0); 4286 l->start, 0);
4265 if (IS_ERR(right)) 4287 if (IS_ERR(right))
4266 return PTR_ERR(right); 4288 return PTR_ERR(right);
4267 4289
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c5586ffd1426..0a3f122dd61f 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1621,6 +1621,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1621 flags | SB_RDONLY, device_name, data); 1621 flags | SB_RDONLY, device_name, data);
1622 if (IS_ERR(mnt_root)) { 1622 if (IS_ERR(mnt_root)) {
1623 root = ERR_CAST(mnt_root); 1623 root = ERR_CAST(mnt_root);
1624 kfree(subvol_name);
1624 goto out; 1625 goto out;
1625 } 1626 }
1626 1627
@@ -1630,12 +1631,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
1630 if (error < 0) { 1631 if (error < 0) {
1631 root = ERR_PTR(error); 1632 root = ERR_PTR(error);
1632 mntput(mnt_root); 1633 mntput(mnt_root);
1634 kfree(subvol_name);
1633 goto out; 1635 goto out;
1634 } 1636 }
1635 } 1637 }
1636 } 1638 }
1637 if (IS_ERR(mnt_root)) { 1639 if (IS_ERR(mnt_root)) {
1638 root = ERR_CAST(mnt_root); 1640 root = ERR_CAST(mnt_root);
1641 kfree(subvol_name);
1639 goto out; 1642 goto out;
1640 } 1643 }
1641 1644
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 127fa1535f58..4ec2b660d014 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -850,14 +850,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
850 850
851 btrfs_trans_release_chunk_metadata(trans); 851 btrfs_trans_release_chunk_metadata(trans);
852 852
853 if (lock && should_end_transaction(trans) &&
854 READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
855 spin_lock(&info->trans_lock);
856 if (cur_trans->state == TRANS_STATE_RUNNING)
857 cur_trans->state = TRANS_STATE_BLOCKED;
858 spin_unlock(&info->trans_lock);
859 }
860
861 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { 853 if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
862 if (throttle) 854 if (throttle)
863 return btrfs_commit_transaction(trans); 855 return btrfs_commit_transaction(trans);
@@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1879 kmem_cache_free(btrfs_trans_handle_cachep, trans); 1871 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1880} 1872}
1881 1873
1874/*
1875 * Release reserved delayed ref space of all pending block groups of the
1876 * transaction and remove them from the list
1877 */
1878static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1879{
1880 struct btrfs_fs_info *fs_info = trans->fs_info;
1881 struct btrfs_block_group_cache *block_group, *tmp;
1882
1883 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
1884 btrfs_delayed_refs_rsv_release(fs_info, 1);
1885 list_del_init(&block_group->bg_list);
1886 }
1887}
1888
1882static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1889static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1883{ 1890{
1884 /* 1891 /*
@@ -2270,6 +2277,7 @@ scrub_continue:
2270 btrfs_scrub_continue(fs_info); 2277 btrfs_scrub_continue(fs_info);
2271cleanup_transaction: 2278cleanup_transaction:
2272 btrfs_trans_release_metadata(trans); 2279 btrfs_trans_release_metadata(trans);
2280 btrfs_cleanup_pending_block_groups(trans);
2273 btrfs_trans_release_chunk_metadata(trans); 2281 btrfs_trans_release_chunk_metadata(trans);
2274 trans->block_rsv = NULL; 2282 trans->block_rsv = NULL;
2275 btrfs_warn(fs_info, "Skipping commit of aborted transaction."); 2283 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 3e4f8f88353e..15561926ab32 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -957,11 +957,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
957 else 957 else
958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL); 958 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
959 959
960 fs_devices->fsid_change = fsid_change_in_progress;
961
962 if (IS_ERR(fs_devices)) 960 if (IS_ERR(fs_devices))
963 return ERR_CAST(fs_devices); 961 return ERR_CAST(fs_devices);
964 962
963 fs_devices->fsid_change = fsid_change_in_progress;
964
965 mutex_lock(&fs_devices->device_list_mutex); 965 mutex_lock(&fs_devices->device_list_mutex);
966 list_add(&fs_devices->fs_list, &fs_uuids); 966 list_add(&fs_devices->fs_list, &fs_uuids);
967 967
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 94c026bba2c2..bba28a5034ba 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1035,6 +1035,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci)
1035 list_del_init(&ci->i_snap_realm_item); 1035 list_del_init(&ci->i_snap_realm_item);
1036 ci->i_snap_realm_counter++; 1036 ci->i_snap_realm_counter++;
1037 ci->i_snap_realm = NULL; 1037 ci->i_snap_realm = NULL;
1038 if (realm->ino == ci->i_vino.ino)
1039 realm->inode = NULL;
1038 spin_unlock(&realm->inodes_with_caps_lock); 1040 spin_unlock(&realm->inodes_with_caps_lock);
1039 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, 1041 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
1040 realm); 1042 realm);
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 03f4d24db8fe..9455d3aef0c3 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -3,19 +3,6 @@
3 * quota.c - CephFS quota 3 * quota.c - CephFS quota
4 * 4 *
5 * Copyright (C) 2017-2018 SUSE 5 * Copyright (C) 2017-2018 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */ 6 */
20 7
21#include <linux/statfs.h> 8#include <linux/statfs.h>
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 593fb422d0f3..e92a2fee3c57 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -252,6 +252,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
252 seq_printf(m, ",ACL"); 252 seq_printf(m, ",ACL");
253#endif 253#endif
254 seq_putc(m, '\n'); 254 seq_putc(m, '\n');
255 seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
255 seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); 256 seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
256 seq_printf(m, "Servers:"); 257 seq_printf(m, "Servers:");
257 258
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d1f9c2f3f575..7652551a1fc4 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
150extern const struct export_operations cifs_export_ops; 150extern const struct export_operations cifs_export_ops;
151#endif /* CONFIG_CIFS_NFSD_EXPORT */ 151#endif /* CONFIG_CIFS_NFSD_EXPORT */
152 152
153#define CIFS_VERSION "2.16" 153#define CIFS_VERSION "2.17"
154#endif /* _CIFSFS_H */ 154#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e18915415e13..bb54ccf8481c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1549,18 +1549,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server)
1549} 1549}
1550 1550
1551static int 1551static int
1552cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1552__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1553 bool malformed)
1553{ 1554{
1554 int length; 1555 int length;
1555 struct cifs_readdata *rdata = mid->callback_data;
1556 1556
1557 length = cifs_discard_remaining_data(server); 1557 length = cifs_discard_remaining_data(server);
1558 dequeue_mid(mid, rdata->result); 1558 dequeue_mid(mid, malformed);
1559 mid->resp_buf = server->smallbuf; 1559 mid->resp_buf = server->smallbuf;
1560 server->smallbuf = NULL; 1560 server->smallbuf = NULL;
1561 return length; 1561 return length;
1562} 1562}
1563 1563
1564static int
1565cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1566{
1567 struct cifs_readdata *rdata = mid->callback_data;
1568
1569 return __cifs_readv_discard(server, mid, rdata->result);
1570}
1571
1564int 1572int
1565cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1573cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1566{ 1574{
@@ -1602,12 +1610,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1602 return -1; 1610 return -1;
1603 } 1611 }
1604 1612
1613 /* set up first two iov for signature check and to get credits */
1614 rdata->iov[0].iov_base = buf;
1615 rdata->iov[0].iov_len = 4;
1616 rdata->iov[1].iov_base = buf + 4;
1617 rdata->iov[1].iov_len = server->total_read - 4;
1618 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1619 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1620 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1621 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1622
1605 /* Was the SMB read successful? */ 1623 /* Was the SMB read successful? */
1606 rdata->result = server->ops->map_error(buf, false); 1624 rdata->result = server->ops->map_error(buf, false);
1607 if (rdata->result != 0) { 1625 if (rdata->result != 0) {
1608 cifs_dbg(FYI, "%s: server returned error %d\n", 1626 cifs_dbg(FYI, "%s: server returned error %d\n",
1609 __func__, rdata->result); 1627 __func__, rdata->result);
1610 return cifs_readv_discard(server, mid); 1628 /* normal error on read response */
1629 return __cifs_readv_discard(server, mid, false);
1611 } 1630 }
1612 1631
1613 /* Is there enough to get to the rest of the READ_RSP header? */ 1632 /* Is there enough to get to the rest of the READ_RSP header? */
@@ -1651,14 +1670,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1651 server->total_read += length; 1670 server->total_read += length;
1652 } 1671 }
1653 1672
1654 /* set up first iov for signature check */
1655 rdata->iov[0].iov_base = buf;
1656 rdata->iov[0].iov_len = 4;
1657 rdata->iov[1].iov_base = buf + 4;
1658 rdata->iov[1].iov_len = server->total_read - 4;
1659 cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n",
1660 rdata->iov[0].iov_base, server->total_read);
1661
1662 /* how much data is in the response? */ 1673 /* how much data is in the response? */
1663#ifdef CONFIG_CIFS_SMB_DIRECT 1674#ifdef CONFIG_CIFS_SMB_DIRECT
1664 use_rdma_mr = rdata->mr; 1675 use_rdma_mr = rdata->mr;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 683310f26171..8463c940e0e5 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -720,6 +720,21 @@ server_unresponsive(struct TCP_Server_Info *server)
720 return false; 720 return false;
721} 721}
722 722
723static inline bool
724zero_credits(struct TCP_Server_Info *server)
725{
726 int val;
727
728 spin_lock(&server->req_lock);
729 val = server->credits + server->echo_credits + server->oplock_credits;
730 if (server->in_flight == 0 && val == 0) {
731 spin_unlock(&server->req_lock);
732 return true;
733 }
734 spin_unlock(&server->req_lock);
735 return false;
736}
737
723static int 738static int
724cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) 739cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
725{ 740{
@@ -732,6 +747,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
732 for (total_read = 0; msg_data_left(smb_msg); total_read += length) { 747 for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
733 try_to_freeze(); 748 try_to_freeze();
734 749
750 /* reconnect if no credits and no requests in flight */
751 if (zero_credits(server)) {
752 cifs_reconnect(server);
753 return -ECONNABORTED;
754 }
755
735 if (server_unresponsive(server)) 756 if (server_unresponsive(server))
736 return -ECONNABORTED; 757 return -ECONNABORTED;
737 if (cifs_rdma_enabled(server) && server->smbd_conn) 758 if (cifs_rdma_enabled(server) && server->smbd_conn)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 2c7689f3998d..659ce1b92c44 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2696,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2696 2696
2697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages); 2697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2698 if (rc) { 2698 if (rc) {
2699 kvfree(wdata->pages);
2699 kfree(wdata); 2700 kfree(wdata);
2700 add_credits_and_wake_if(server, credits, 0); 2701 add_credits_and_wake_if(server, credits, 0);
2701 break; 2702 break;
@@ -2707,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2707 if (rc) { 2708 if (rc) {
2708 for (i = 0; i < nr_pages; i++) 2709 for (i = 0; i < nr_pages; i++)
2709 put_page(wdata->pages[i]); 2710 put_page(wdata->pages[i]);
2711 kvfree(wdata->pages);
2710 kfree(wdata); 2712 kfree(wdata);
2711 add_credits_and_wake_if(server, credits, 0); 2713 add_credits_and_wake_if(server, credits, 0);
2712 break; 2714 break;
@@ -3386,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3386 } 3388 }
3387 3389
3388 rc = cifs_read_allocate_pages(rdata, npages); 3390 rc = cifs_read_allocate_pages(rdata, npages);
3389 if (rc) 3391 if (rc) {
3390 goto error; 3392 kvfree(rdata->pages);
3393 kfree(rdata);
3394 add_credits_and_wake_if(server, credits, 0);
3395 break;
3396 }
3391 3397
3392 rdata->tailsz = PAGE_SIZE; 3398 rdata->tailsz = PAGE_SIZE;
3393 } 3399 }
@@ -3407,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3407 if (!rdata->cfile->invalidHandle || 3413 if (!rdata->cfile->invalidHandle ||
3408 !(rc = cifs_reopen_file(rdata->cfile, true))) 3414 !(rc = cifs_reopen_file(rdata->cfile, true)))
3409 rc = server->ops->async_readv(rdata); 3415 rc = server->ops->async_readv(rdata);
3410error:
3411 if (rc) { 3416 if (rc) {
3412 add_credits_and_wake_if(server, rdata->credits, 0); 3417 add_credits_and_wake_if(server, rdata->credits, 0);
3413 kref_put(&rdata->refcount, 3418 kref_put(&rdata->refcount,
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index f14533da3a93..01a76bccdb8d 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -293,6 +293,8 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
293 int rc; 293 int rc;
294 struct smb2_file_all_info *smb2_data; 294 struct smb2_file_all_info *smb2_data;
295 __u32 create_options = 0; 295 __u32 create_options = 0;
296 struct cifs_fid fid;
297 bool no_cached_open = tcon->nohandlecache;
296 298
297 *adjust_tz = false; 299 *adjust_tz = false;
298 *symlink = false; 300 *symlink = false;
@@ -301,6 +303,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
301 GFP_KERNEL); 303 GFP_KERNEL);
302 if (smb2_data == NULL) 304 if (smb2_data == NULL)
303 return -ENOMEM; 305 return -ENOMEM;
306
307 /* If it is a root and its handle is cached then use it */
308 if (!strlen(full_path) && !no_cached_open) {
309 rc = open_shroot(xid, tcon, &fid);
310 if (rc)
311 goto out;
312 rc = SMB2_query_info(xid, tcon, fid.persistent_fid,
313 fid.volatile_fid, smb2_data);
314 close_shroot(&tcon->crfid);
315 if (rc)
316 goto out;
317 move_smb2_info_to_cifs(data, smb2_data);
318 goto out;
319 }
320
304 if (backup_cred(cifs_sb)) 321 if (backup_cred(cifs_sb))
305 create_options |= CREATE_OPEN_BACKUP_INTENT; 322 create_options |= CREATE_OPEN_BACKUP_INTENT;
306 323
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 6a9c47541c53..7b8b58fb4d3f 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
648 if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) 648 if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
649 return false; 649 return false;
650 650
651 if (rsp->sync_hdr.CreditRequest) {
652 spin_lock(&server->req_lock);
653 server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest);
654 spin_unlock(&server->req_lock);
655 wake_up(&server->request_q);
656 }
657
651 if (rsp->StructureSize != 658 if (rsp->StructureSize !=
652 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { 659 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
653 if (le16_to_cpu(rsp->StructureSize) == 44) 660 if (le16_to_cpu(rsp->StructureSize) == 44)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index cf7eb891804f..6f96e2292856 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -34,6 +34,7 @@
34#include "cifs_ioctl.h" 34#include "cifs_ioctl.h"
35#include "smbdirect.h" 35#include "smbdirect.h"
36 36
37/* Change credits for different ops and return the total number of credits */
37static int 38static int
38change_conf(struct TCP_Server_Info *server) 39change_conf(struct TCP_Server_Info *server)
39{ 40{
@@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server)
41 server->oplock_credits = server->echo_credits = 0; 42 server->oplock_credits = server->echo_credits = 0;
42 switch (server->credits) { 43 switch (server->credits) {
43 case 0: 44 case 0:
44 return -1; 45 return 0;
45 case 1: 46 case 1:
46 server->echoes = false; 47 server->echoes = false;
47 server->oplocks = false; 48 server->oplocks = false;
48 cifs_dbg(VFS, "disabling echoes and oplocks\n");
49 break; 49 break;
50 case 2: 50 case 2:
51 server->echoes = true; 51 server->echoes = true;
52 server->oplocks = false; 52 server->oplocks = false;
53 server->echo_credits = 1; 53 server->echo_credits = 1;
54 cifs_dbg(FYI, "disabling oplocks\n");
55 break; 54 break;
56 default: 55 default:
57 server->echoes = true; 56 server->echoes = true;
@@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server)
64 server->echo_credits = 1; 63 server->echo_credits = 1;
65 } 64 }
66 server->credits -= server->echo_credits + server->oplock_credits; 65 server->credits -= server->echo_credits + server->oplock_credits;
67 return 0; 66 return server->credits + server->echo_credits + server->oplock_credits;
68} 67}
69 68
70static void 69static void
71smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, 70smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
72 const int optype) 71 const int optype)
73{ 72{
74 int *val, rc = 0; 73 int *val, rc = -1;
74
75 spin_lock(&server->req_lock); 75 spin_lock(&server->req_lock);
76 val = server->ops->get_credits_field(server, optype); 76 val = server->ops->get_credits_field(server, optype);
77 77
@@ -101,8 +101,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
101 } 101 }
102 spin_unlock(&server->req_lock); 102 spin_unlock(&server->req_lock);
103 wake_up(&server->request_q); 103 wake_up(&server->request_q);
104 if (rc) 104
105 cifs_reconnect(server); 105 if (server->tcpStatus == CifsNeedReconnect)
106 return;
107
108 switch (rc) {
109 case -1:
110 /* change_conf hasn't been executed */
111 break;
112 case 0:
113 cifs_dbg(VFS, "Possible client or server bug - zero credits\n");
114 break;
115 case 1:
116 cifs_dbg(VFS, "disabling echoes and oplocks\n");
117 break;
118 case 2:
119 cifs_dbg(FYI, "disabling oplocks\n");
120 break;
121 default:
122 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
123 }
106} 124}
107 125
108static void 126static void
@@ -136,7 +154,11 @@ smb2_get_credits(struct mid_q_entry *mid)
136{ 154{
137 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf; 155 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)mid->resp_buf;
138 156
139 return le16_to_cpu(shdr->CreditRequest); 157 if (mid->mid_state == MID_RESPONSE_RECEIVED
158 || mid->mid_state == MID_RESPONSE_MALFORMED)
159 return le16_to_cpu(shdr->CreditRequest);
160
161 return 0;
140} 162}
141 163
142static int 164static int
@@ -165,14 +187,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
165 187
166 scredits = server->credits; 188 scredits = server->credits;
167 /* can deadlock with reopen */ 189 /* can deadlock with reopen */
168 if (scredits == 1) { 190 if (scredits <= 8) {
169 *num = SMB2_MAX_BUFFER_SIZE; 191 *num = SMB2_MAX_BUFFER_SIZE;
170 *credits = 0; 192 *credits = 0;
171 break; 193 break;
172 } 194 }
173 195
174 /* leave one credit for a possible reopen */ 196 /* leave some credits for reopen and other ops */
175 scredits--; 197 scredits -= 8;
176 *num = min_t(unsigned int, size, 198 *num = min_t(unsigned int, size,
177 scredits * SMB2_MAX_BUFFER_SIZE); 199 scredits * SMB2_MAX_BUFFER_SIZE);
178 200
@@ -844,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
844 FILE_READ_EA, 866 FILE_READ_EA,
845 FILE_FULL_EA_INFORMATION, 867 FILE_FULL_EA_INFORMATION,
846 SMB2_O_INFO_FILE, 868 SMB2_O_INFO_FILE,
847 SMB2_MAX_EA_BUF, 869 CIFSMaxBufSize -
870 MAX_SMB2_CREATE_RESPONSE_SIZE -
871 MAX_SMB2_CLOSE_RESPONSE_SIZE,
848 &rsp_iov, &buftype, cifs_sb); 872 &rsp_iov, &buftype, cifs_sb);
849 if (rc) { 873 if (rc) {
850 /* 874 /*
@@ -3189,11 +3213,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3189 server->ops->is_status_pending(buf, server, 0)) 3213 server->ops->is_status_pending(buf, server, 0))
3190 return -1; 3214 return -1;
3191 3215
3192 rdata->result = server->ops->map_error(buf, false); 3216 /* set up first two iov to get credits */
3217 rdata->iov[0].iov_base = buf;
3218 rdata->iov[0].iov_len = 4;
3219 rdata->iov[1].iov_base = buf + 4;
3220 rdata->iov[1].iov_len =
3221 min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4;
3222 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3223 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
3224 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
3225 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
3226
3227 rdata->result = server->ops->map_error(buf, true);
3193 if (rdata->result != 0) { 3228 if (rdata->result != 0) {
3194 cifs_dbg(FYI, "%s: server returned error %d\n", 3229 cifs_dbg(FYI, "%s: server returned error %d\n",
3195 __func__, rdata->result); 3230 __func__, rdata->result);
3196 dequeue_mid(mid, rdata->result); 3231 /* normal error on read response */
3232 dequeue_mid(mid, false);
3197 return 0; 3233 return 0;
3198 } 3234 }
3199 3235
@@ -3266,14 +3302,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
3266 return 0; 3302 return 0;
3267 } 3303 }
3268 3304
3269 /* set up first iov for signature check */
3270 rdata->iov[0].iov_base = buf;
3271 rdata->iov[0].iov_len = 4;
3272 rdata->iov[1].iov_base = buf + 4;
3273 rdata->iov[1].iov_len = server->vals->read_rsp_size - 4;
3274 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
3275 rdata->iov[0].iov_base, server->vals->read_rsp_size);
3276
3277 length = rdata->copy_into_pages(server, rdata, &iter); 3305 length = rdata->copy_into_pages(server, rdata, &iter);
3278 3306
3279 kfree(bvec); 3307 kfree(bvec);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 50811a7dc0e0..77b3aaa39b35 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2816,6 +2816,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
2816 int resp_buftype = CIFS_NO_BUFFER; 2816 int resp_buftype = CIFS_NO_BUFFER;
2817 struct cifs_ses *ses = tcon->ses; 2817 struct cifs_ses *ses = tcon->ses;
2818 int flags = 0; 2818 int flags = 0;
2819 bool allocated = false;
2819 2820
2820 cifs_dbg(FYI, "Query Info\n"); 2821 cifs_dbg(FYI, "Query Info\n");
2821 2822
@@ -2855,14 +2856,21 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
2855 "Error %d allocating memory for acl\n", 2856 "Error %d allocating memory for acl\n",
2856 rc); 2857 rc);
2857 *dlen = 0; 2858 *dlen = 0;
2859 rc = -ENOMEM;
2858 goto qinf_exit; 2860 goto qinf_exit;
2859 } 2861 }
2862 allocated = true;
2860 } 2863 }
2861 } 2864 }
2862 2865
2863 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset), 2866 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
2864 le32_to_cpu(rsp->OutputBufferLength), 2867 le32_to_cpu(rsp->OutputBufferLength),
2865 &rsp_iov, min_len, *data); 2868 &rsp_iov, min_len, *data);
2869 if (rc && allocated) {
2870 kfree(*data);
2871 *data = NULL;
2872 *dlen = 0;
2873 }
2866 2874
2867qinf_exit: 2875qinf_exit:
2868 SMB2_query_info_free(&rqst); 2876 SMB2_query_info_free(&rqst);
@@ -2916,9 +2924,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
2916{ 2924{
2917 struct TCP_Server_Info *server = mid->callback_data; 2925 struct TCP_Server_Info *server = mid->callback_data;
2918 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf; 2926 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
2919 unsigned int credits_received = 1; 2927 unsigned int credits_received = 0;
2920 2928
2921 if (mid->mid_state == MID_RESPONSE_RECEIVED) 2929 if (mid->mid_state == MID_RESPONSE_RECEIVED
2930 || mid->mid_state == MID_RESPONSE_MALFORMED)
2922 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest); 2931 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
2923 2932
2924 DeleteMidQEntry(mid); 2933 DeleteMidQEntry(mid);
@@ -3175,7 +3184,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
3175 struct TCP_Server_Info *server = tcon->ses->server; 3184 struct TCP_Server_Info *server = tcon->ses->server;
3176 struct smb2_sync_hdr *shdr = 3185 struct smb2_sync_hdr *shdr =
3177 (struct smb2_sync_hdr *)rdata->iov[0].iov_base; 3186 (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
3178 unsigned int credits_received = 1; 3187 unsigned int credits_received = 0;
3179 struct smb_rqst rqst = { .rq_iov = rdata->iov, 3188 struct smb_rqst rqst = { .rq_iov = rdata->iov,
3180 .rq_nvec = 2, 3189 .rq_nvec = 2,
3181 .rq_pages = rdata->pages, 3190 .rq_pages = rdata->pages,
@@ -3214,6 +3223,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
3214 task_io_account_read(rdata->got_bytes); 3223 task_io_account_read(rdata->got_bytes);
3215 cifs_stats_bytes_read(tcon, rdata->got_bytes); 3224 cifs_stats_bytes_read(tcon, rdata->got_bytes);
3216 break; 3225 break;
3226 case MID_RESPONSE_MALFORMED:
3227 credits_received = le16_to_cpu(shdr->CreditRequest);
3228 /* fall through */
3217 default: 3229 default:
3218 if (rdata->result != -ENODATA) 3230 if (rdata->result != -ENODATA)
3219 rdata->result = -EIO; 3231 rdata->result = -EIO;
@@ -3229,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
3229 rdata->mr = NULL; 3241 rdata->mr = NULL;
3230 } 3242 }
3231#endif 3243#endif
3232 if (rdata->result) 3244 if (rdata->result && rdata->result != -ENODATA) {
3233 cifs_stats_fail_inc(tcon, SMB2_READ_HE); 3245 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
3246 trace_smb3_read_err(0 /* xid */,
3247 rdata->cfile->fid.persistent_fid,
3248 tcon->tid, tcon->ses->Suid, rdata->offset,
3249 rdata->bytes, rdata->result);
3250 } else
3251 trace_smb3_read_done(0 /* xid */,
3252 rdata->cfile->fid.persistent_fid,
3253 tcon->tid, tcon->ses->Suid,
3254 rdata->offset, rdata->got_bytes);
3234 3255
3235 queue_work(cifsiod_wq, &rdata->work); 3256 queue_work(cifsiod_wq, &rdata->work);
3236 DeleteMidQEntry(mid); 3257 DeleteMidQEntry(mid);
@@ -3305,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata)
3305 if (rc) { 3326 if (rc) {
3306 kref_put(&rdata->refcount, cifs_readdata_release); 3327 kref_put(&rdata->refcount, cifs_readdata_release);
3307 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); 3328 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
3308 trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid, 3329 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
3309 io_parms.tcon->tid, io_parms.tcon->ses->Suid, 3330 io_parms.tcon->tid,
3310 io_parms.offset, io_parms.length); 3331 io_parms.tcon->ses->Suid,
3311 } else 3332 io_parms.offset, io_parms.length, rc);
3312 trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid, 3333 }
3313 io_parms.tcon->tid, io_parms.tcon->ses->Suid,
3314 io_parms.offset, io_parms.length);
3315 3334
3316 cifs_small_buf_release(buf); 3335 cifs_small_buf_release(buf);
3317 return rc; 3336 return rc;
@@ -3355,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
3355 if (rc != -ENODATA) { 3374 if (rc != -ENODATA) {
3356 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 3375 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
3357 cifs_dbg(VFS, "Send error in read = %d\n", rc); 3376 cifs_dbg(VFS, "Send error in read = %d\n", rc);
3377 trace_smb3_read_err(xid, req->PersistentFileId,
3378 io_parms->tcon->tid, ses->Suid,
3379 io_parms->offset, io_parms->length,
3380 rc);
3358 } 3381 }
3359 trace_smb3_read_err(rc, xid, req->PersistentFileId,
3360 io_parms->tcon->tid, ses->Suid,
3361 io_parms->offset, io_parms->length);
3362 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 3382 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3363 return rc == -ENODATA ? 0 : rc; 3383 return rc == -ENODATA ? 0 : rc;
3364 } else 3384 } else
@@ -3399,7 +3419,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
3399 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 3419 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
3400 unsigned int written; 3420 unsigned int written;
3401 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf; 3421 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
3402 unsigned int credits_received = 1; 3422 unsigned int credits_received = 0;
3403 3423
3404 switch (mid->mid_state) { 3424 switch (mid->mid_state) {
3405 case MID_RESPONSE_RECEIVED: 3425 case MID_RESPONSE_RECEIVED:
@@ -3427,6 +3447,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
3427 case MID_RETRY_NEEDED: 3447 case MID_RETRY_NEEDED:
3428 wdata->result = -EAGAIN; 3448 wdata->result = -EAGAIN;
3429 break; 3449 break;
3450 case MID_RESPONSE_MALFORMED:
3451 credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
3452 /* fall through */
3430 default: 3453 default:
3431 wdata->result = -EIO; 3454 wdata->result = -EIO;
3432 break; 3455 break;
@@ -3444,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid)
3444 wdata->mr = NULL; 3467 wdata->mr = NULL;
3445 } 3468 }
3446#endif 3469#endif
3447 if (wdata->result) 3470 if (wdata->result) {
3448 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3471 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
3472 trace_smb3_write_err(0 /* no xid */,
3473 wdata->cfile->fid.persistent_fid,
3474 tcon->tid, tcon->ses->Suid, wdata->offset,
3475 wdata->bytes, wdata->result);
3476 } else
3477 trace_smb3_write_done(0 /* no xid */,
3478 wdata->cfile->fid.persistent_fid,
3479 tcon->tid, tcon->ses->Suid,
3480 wdata->offset, wdata->bytes);
3449 3481
3450 queue_work(cifsiod_wq, &wdata->work); 3482 queue_work(cifsiod_wq, &wdata->work);
3451 DeleteMidQEntry(mid); 3483 DeleteMidQEntry(mid);
@@ -3587,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
3587 wdata->bytes, rc); 3619 wdata->bytes, rc);
3588 kref_put(&wdata->refcount, release); 3620 kref_put(&wdata->refcount, release);
3589 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); 3621 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
3590 } else 3622 }
3591 trace_smb3_write_done(0 /* no xid */, req->PersistentFileId,
3592 tcon->tid, tcon->ses->Suid, wdata->offset,
3593 wdata->bytes);
3594 3623
3595async_writev_out: 3624async_writev_out:
3596 cifs_small_buf_release(req); 3625 cifs_small_buf_release(req);
@@ -3816,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
3816 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { 3845 rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
3817 srch_inf->endOfSearch = true; 3846 srch_inf->endOfSearch = true;
3818 rc = 0; 3847 rc = 0;
3819 } 3848 } else
3820 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); 3849 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
3821 goto qdir_exit; 3850 goto qdir_exit;
3822 } 3851 }
3823 3852
@@ -4412,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
4412 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); 4441 rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
4413 cifs_small_buf_release(req); 4442 cifs_small_buf_release(req);
4414 4443
4415 please_key_low = (__u64 *)req->LeaseKey; 4444 please_key_low = (__u64 *)lease_key;
4416 please_key_high = (__u64 *)(req->LeaseKey+8); 4445 please_key_high = (__u64 *)(lease_key+8);
4417 if (rc) { 4446 if (rc) {
4418 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); 4447 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
4419 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, 4448 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 7a2d0a2255e6..538e2299805f 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,9 @@
84 84
85#define NUMBER_OF_SMB2_COMMANDS 0x0013 85#define NUMBER_OF_SMB2_COMMANDS 0x0013
86 86
87/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ 87/* 52 transform hdr + 64 hdr + 88 create rsp */
88#define MAX_SMB2_HDR_SIZE 0x00b0 88#define SMB2_TRANSFORM_HEADER_SIZE 52
89#define MAX_SMB2_HDR_SIZE 204
89 90
90#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) 91#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
91#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) 92#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
@@ -648,6 +649,13 @@ struct smb2_create_req {
648 __u8 Buffer[0]; 649 __u8 Buffer[0];
649} __packed; 650} __packed;
650 651
652/*
653 * Maximum size of a SMB2_CREATE response is 64 (smb2 header) +
654 * 88 (fixed part of create response) + 520 (path) + 150 (contexts) +
655 * 2 bytes of padding.
656 */
657#define MAX_SMB2_CREATE_RESPONSE_SIZE 824
658
651struct smb2_create_rsp { 659struct smb2_create_rsp {
652 struct smb2_sync_hdr sync_hdr; 660 struct smb2_sync_hdr sync_hdr;
653 __le16 StructureSize; /* Must be 89 */ 661 __le16 StructureSize; /* Must be 89 */
@@ -996,6 +1004,11 @@ struct smb2_close_req {
996 __u64 VolatileFileId; /* opaque endianness */ 1004 __u64 VolatileFileId; /* opaque endianness */
997} __packed; 1005} __packed;
998 1006
1007/*
1008 * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data)
1009 */
1010#define MAX_SMB2_CLOSE_RESPONSE_SIZE 124
1011
999struct smb2_close_rsp { 1012struct smb2_close_rsp {
1000 struct smb2_sync_hdr sync_hdr; 1013 struct smb2_sync_hdr sync_hdr;
1001 __le16 StructureSize; /* 60 */ 1014 __le16 StructureSize; /* 60 */
@@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
1398 char FileName[0]; /* Name to be assigned to new link */ 1411 char FileName[0]; /* Name to be assigned to new link */
1399} __packed; /* level 11 Set */ 1412} __packed; /* level 11 Set */
1400 1413
1401#define SMB2_MAX_EA_BUF 65536
1402
1403struct smb2_file_full_ea_info { /* encoding of response for level 15 */ 1414struct smb2_file_full_ea_info { /* encoding of response for level 15 */
1404 __le32 next_entry_offset; 1415 __le32 next_entry_offset;
1405 __u8 flags; 1416 __u8 flags;
diff --git a/fs/cifs/trace.c b/fs/cifs/trace.c
index bd4a546feec1..465483787193 100644
--- a/fs/cifs/trace.c
+++ b/fs/cifs/trace.c
@@ -3,16 +3,6 @@
3 * Copyright (C) 2018, Microsoft Corporation. 3 * Copyright (C) 2018, Microsoft Corporation.
4 * 4 *
5 * Author(s): Steve French <stfrench@microsoft.com> 5 * Author(s): Steve French <stfrench@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU General Public License for more details.
16 */ 6 */
17#define CREATE_TRACE_POINTS 7#define CREATE_TRACE_POINTS
18#include "trace.h" 8#include "trace.h"
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index fb049809555f..59be48206932 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -3,16 +3,6 @@
3 * Copyright (C) 2018, Microsoft Corporation. 3 * Copyright (C) 2018, Microsoft Corporation.
4 * 4 *
5 * Author(s): Steve French <stfrench@microsoft.com> 5 * Author(s): Steve French <stfrench@microsoft.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU General Public License for more details.
16 */ 6 */
17#undef TRACE_SYSTEM 7#undef TRACE_SYSTEM
18#define TRACE_SYSTEM cifs 8#define TRACE_SYSTEM cifs
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 202e0e84efdd..53532bd3f50d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -786,17 +786,8 @@ static void
786cifs_compound_callback(struct mid_q_entry *mid) 786cifs_compound_callback(struct mid_q_entry *mid)
787{ 787{
788 struct TCP_Server_Info *server = mid->server; 788 struct TCP_Server_Info *server = mid->server;
789 unsigned int optype = mid->optype;
790 unsigned int credits_received = 0;
791 789
792 if (mid->mid_state == MID_RESPONSE_RECEIVED) { 790 add_credits(server, server->ops->get_credits(mid), mid->optype);
793 if (mid->resp_buf)
794 credits_received = server->ops->get_credits(mid);
795 else
796 cifs_dbg(FYI, "Bad state for cancelled MID\n");
797 }
798
799 add_credits(server, credits_received, optype);
800} 791}
801 792
802static void 793static void
diff --git a/fs/dcache.c b/fs/dcache.c
index 2593153471cf..aac41adf4743 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = {
119 119
120static DEFINE_PER_CPU(long, nr_dentry); 120static DEFINE_PER_CPU(long, nr_dentry);
121static DEFINE_PER_CPU(long, nr_dentry_unused); 121static DEFINE_PER_CPU(long, nr_dentry_unused);
122static DEFINE_PER_CPU(long, nr_dentry_negative);
122 123
123#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) 124#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124 125
@@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void)
152 return sum < 0 ? 0 : sum; 153 return sum < 0 ? 0 : sum;
153} 154}
154 155
156static long get_nr_dentry_negative(void)
157{
158 int i;
159 long sum = 0;
160
161 for_each_possible_cpu(i)
162 sum += per_cpu(nr_dentry_negative, i);
163 return sum < 0 ? 0 : sum;
164}
165
155int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, 166int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos) 167 size_t *lenp, loff_t *ppos)
157{ 168{
158 dentry_stat.nr_dentry = get_nr_dentry(); 169 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused(); 170 dentry_stat.nr_unused = get_nr_dentry_unused();
171 dentry_stat.nr_negative = get_nr_dentry_negative();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 172 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161} 173}
162#endif 174#endif
@@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
317 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 329 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
318 WRITE_ONCE(dentry->d_flags, flags); 330 WRITE_ONCE(dentry->d_flags, flags);
319 dentry->d_inode = NULL; 331 dentry->d_inode = NULL;
332 if (dentry->d_flags & DCACHE_LRU_LIST)
333 this_cpu_inc(nr_dentry_negative);
320} 334}
321 335
322static void dentry_free(struct dentry *dentry) 336static void dentry_free(struct dentry *dentry)
@@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
371 * The per-cpu "nr_dentry_unused" counters are updated with 385 * The per-cpu "nr_dentry_unused" counters are updated with
372 * the DCACHE_LRU_LIST bit. 386 * the DCACHE_LRU_LIST bit.
373 * 387 *
388 * The per-cpu "nr_dentry_negative" counters are only updated
389 * when deleted from or added to the per-superblock LRU list, not
390 * from/to the shrink list. That is to avoid an unneeded dec/inc
391 * pair when moving from LRU to shrink list in select_collect().
392 *
374 * These helper functions make sure we always follow the 393 * These helper functions make sure we always follow the
375 * rules. d_lock must be held by the caller. 394 * rules. d_lock must be held by the caller.
376 */ 395 */
@@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry)
380 D_FLAG_VERIFY(dentry, 0); 399 D_FLAG_VERIFY(dentry, 0);
381 dentry->d_flags |= DCACHE_LRU_LIST; 400 dentry->d_flags |= DCACHE_LRU_LIST;
382 this_cpu_inc(nr_dentry_unused); 401 this_cpu_inc(nr_dentry_unused);
402 if (d_is_negative(dentry))
403 this_cpu_inc(nr_dentry_negative);
383 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 404 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
384} 405}
385 406
@@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry)
388 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 409 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
389 dentry->d_flags &= ~DCACHE_LRU_LIST; 410 dentry->d_flags &= ~DCACHE_LRU_LIST;
390 this_cpu_dec(nr_dentry_unused); 411 this_cpu_dec(nr_dentry_unused);
412 if (d_is_negative(dentry))
413 this_cpu_dec(nr_dentry_negative);
391 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); 414 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
392} 415}
393 416
@@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
418 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
419 dentry->d_flags &= ~DCACHE_LRU_LIST; 442 dentry->d_flags &= ~DCACHE_LRU_LIST;
420 this_cpu_dec(nr_dentry_unused); 443 this_cpu_dec(nr_dentry_unused);
444 if (d_is_negative(dentry))
445 this_cpu_dec(nr_dentry_negative);
421 list_lru_isolate(lru, &dentry->d_lru); 446 list_lru_isolate(lru, &dentry->d_lru);
422} 447}
423 448
@@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
426{ 451{
427 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 452 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
428 dentry->d_flags |= DCACHE_SHRINK_LIST; 453 dentry->d_flags |= DCACHE_SHRINK_LIST;
454 if (d_is_negative(dentry))
455 this_cpu_dec(nr_dentry_negative);
429 list_lru_isolate_move(lru, &dentry->d_lru, list); 456 list_lru_isolate_move(lru, &dentry->d_lru, list);
430} 457}
431 458
@@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1188 */ 1215 */
1189void shrink_dcache_sb(struct super_block *sb) 1216void shrink_dcache_sb(struct super_block *sb)
1190{ 1217{
1191 long freed;
1192
1193 do { 1218 do {
1194 LIST_HEAD(dispose); 1219 LIST_HEAD(dispose);
1195 1220
1196 freed = list_lru_walk(&sb->s_dentry_lru, 1221 list_lru_walk(&sb->s_dentry_lru,
1197 dentry_lru_isolate_shrink, &dispose, 1024); 1222 dentry_lru_isolate_shrink, &dispose, 1024);
1198
1199 this_cpu_sub(nr_dentry_unused, freed);
1200 shrink_dentry_list(&dispose); 1223 shrink_dentry_list(&dispose);
1201 } while (list_lru_count(&sb->s_dentry_lru) > 0); 1224 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1202} 1225}
@@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1820 WARN_ON(d_in_lookup(dentry)); 1843 WARN_ON(d_in_lookup(dentry));
1821 1844
1822 spin_lock(&dentry->d_lock); 1845 spin_lock(&dentry->d_lock);
1846 /*
1847 * Decrement negative dentry count if it was in the LRU list.
1848 */
1849 if (dentry->d_flags & DCACHE_LRU_LIST)
1850 this_cpu_dec(nr_dentry_negative);
1823 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1851 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1824 raw_write_seqcount_begin(&dentry->d_seq); 1852 raw_write_seqcount_begin(&dentry->d_seq);
1825 __d_set_inode_and_type(dentry, inode, add_flags); 1853 __d_set_inode_and_type(dentry, inode, add_flags);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index dbc1a1f080ce..ec2fb6fe6d37 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -679,6 +679,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
679 unsigned long fs_count; /* Number of filesystem-sized blocks */ 679 unsigned long fs_count; /* Number of filesystem-sized blocks */
680 int create; 680 int create;
681 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor; 681 unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
682 loff_t i_size;
682 683
683 /* 684 /*
684 * If there was a memory error and we've overwritten all the 685 * If there was a memory error and we've overwritten all the
@@ -708,8 +709,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
708 */ 709 */
709 create = dio->op == REQ_OP_WRITE; 710 create = dio->op == REQ_OP_WRITE;
710 if (dio->flags & DIO_SKIP_HOLES) { 711 if (dio->flags & DIO_SKIP_HOLES) {
711 if (fs_startblk <= ((i_size_read(dio->inode) - 1) >> 712 i_size = i_size_read(dio->inode);
712 i_blkbits)) 713 if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
713 create = 0; 714 create = 0;
714 } 715 }
715 716
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 82377017130f..d31b6c72b476 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
21 spin_lock(&sb->s_inode_list_lock); 21 spin_lock(&sb->s_inode_list_lock);
22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 22 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
23 spin_lock(&inode->i_lock); 23 spin_lock(&inode->i_lock);
24 /*
25 * We must skip inodes in unusual state. We may also skip
26 * inodes without pages but we deliberately won't in case
27 * we need to reschedule to avoid softlockups.
28 */
24 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || 29 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
25 (inode->i_mapping->nrpages == 0)) { 30 (inode->i_mapping->nrpages == 0 && !need_resched())) {
26 spin_unlock(&inode->i_lock); 31 spin_unlock(&inode->i_lock);
27 continue; 32 continue;
28 } 33 }
@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
30 spin_unlock(&inode->i_lock); 35 spin_unlock(&inode->i_lock);
31 spin_unlock(&sb->s_inode_list_lock); 36 spin_unlock(&sb->s_inode_list_lock);
32 37
38 cond_resched();
33 invalidate_mapping_pages(inode->i_mapping, 0, -1); 39 invalidate_mapping_pages(inode->i_mapping, 0, -1);
34 iput(toput_inode); 40 iput(toput_inode);
35 toput_inode = inode; 41 toput_inode = inode;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b40168fcc94a..36855c1f8daf 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
331 struct work_struct work; 331 struct work_struct work;
332}; 332};
333 333
334static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
335{
336 down_write(&bdi->wb_switch_rwsem);
337}
338
339static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
340{
341 up_write(&bdi->wb_switch_rwsem);
342}
343
334static void inode_switch_wbs_work_fn(struct work_struct *work) 344static void inode_switch_wbs_work_fn(struct work_struct *work)
335{ 345{
336 struct inode_switch_wbs_context *isw = 346 struct inode_switch_wbs_context *isw =
337 container_of(work, struct inode_switch_wbs_context, work); 347 container_of(work, struct inode_switch_wbs_context, work);
338 struct inode *inode = isw->inode; 348 struct inode *inode = isw->inode;
349 struct backing_dev_info *bdi = inode_to_bdi(inode);
339 struct address_space *mapping = inode->i_mapping; 350 struct address_space *mapping = inode->i_mapping;
340 struct bdi_writeback *old_wb = inode->i_wb; 351 struct bdi_writeback *old_wb = inode->i_wb;
341 struct bdi_writeback *new_wb = isw->new_wb; 352 struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
344 bool switched = false; 355 bool switched = false;
345 356
346 /* 357 /*
358 * If @inode switches cgwb membership while sync_inodes_sb() is
359 * being issued, sync_inodes_sb() might miss it. Synchronize.
360 */
361 down_read(&bdi->wb_switch_rwsem);
362
363 /*
347 * By the time control reaches here, RCU grace period has passed 364 * By the time control reaches here, RCU grace period has passed
348 * since I_WB_SWITCH assertion and all wb stat update transactions 365 * since I_WB_SWITCH assertion and all wb stat update transactions
349 * between unlocked_inode_to_wb_begin/end() are guaranteed to be 366 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -428,6 +445,8 @@ skip_switch:
428 spin_unlock(&new_wb->list_lock); 445 spin_unlock(&new_wb->list_lock);
429 spin_unlock(&old_wb->list_lock); 446 spin_unlock(&old_wb->list_lock);
430 447
448 up_read(&bdi->wb_switch_rwsem);
449
431 if (switched) { 450 if (switched) {
432 wb_wakeup(new_wb); 451 wb_wakeup(new_wb);
433 wb_put(old_wb); 452 wb_put(old_wb);
@@ -468,9 +487,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
468 if (inode->i_state & I_WB_SWITCH) 487 if (inode->i_state & I_WB_SWITCH)
469 return; 488 return;
470 489
490 /*
491 * Avoid starting new switches while sync_inodes_sb() is in
492 * progress. Otherwise, if the down_write protected issue path
493 * blocks heavily, we might end up starting a large number of
494 * switches which will block on the rwsem.
495 */
496 if (!down_read_trylock(&bdi->wb_switch_rwsem))
497 return;
498
471 isw = kzalloc(sizeof(*isw), GFP_ATOMIC); 499 isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
472 if (!isw) 500 if (!isw)
473 return; 501 goto out_unlock;
474 502
475 /* find and pin the new wb */ 503 /* find and pin the new wb */
476 rcu_read_lock(); 504 rcu_read_lock();
@@ -504,12 +532,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
504 * Let's continue after I_WB_SWITCH is guaranteed to be visible. 532 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
505 */ 533 */
506 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 534 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
507 return; 535 goto out_unlock;
508 536
509out_free: 537out_free:
510 if (isw->new_wb) 538 if (isw->new_wb)
511 wb_put(isw->new_wb); 539 wb_put(isw->new_wb);
512 kfree(isw); 540 kfree(isw);
541out_unlock:
542 up_read(&bdi->wb_switch_rwsem);
513} 543}
514 544
515/** 545/**
@@ -887,6 +917,9 @@ fs_initcall(cgroup_writeback_init);
887 917
888#else /* CONFIG_CGROUP_WRITEBACK */ 918#else /* CONFIG_CGROUP_WRITEBACK */
889 919
920static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
921static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
922
890static struct bdi_writeback * 923static struct bdi_writeback *
891locked_inode_to_wb_and_lock_list(struct inode *inode) 924locked_inode_to_wb_and_lock_list(struct inode *inode)
892 __releases(&inode->i_lock) 925 __releases(&inode->i_lock)
@@ -2413,8 +2446,11 @@ void sync_inodes_sb(struct super_block *sb)
2413 return; 2446 return;
2414 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 2447 WARN_ON(!rwsem_is_locked(&sb->s_umount));
2415 2448
2449 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2450 bdi_down_write_wb_switch_rwsem(bdi);
2416 bdi_split_work_to_wbs(bdi, &work, false); 2451 bdi_split_work_to_wbs(bdi, &work, false);
2417 wb_wait_for_completion(bdi, &done); 2452 wb_wait_for_completion(bdi, &done);
2453 bdi_up_write_wb_switch_rwsem(bdi);
2418 2454
2419 wait_sb_inodes(sb); 2455 wait_sb_inodes(sb);
2420} 2456}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a5e516a40e7a..809c0f2f9942 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1742 req->in.h.nodeid = outarg->nodeid; 1742 req->in.h.nodeid = outarg->nodeid;
1743 req->in.numargs = 2; 1743 req->in.numargs = 2;
1744 req->in.argpages = 1; 1744 req->in.argpages = 1;
1745 req->page_descs[0].offset = offset;
1746 req->end = fuse_retrieve_end; 1745 req->end = fuse_retrieve_end;
1747 1746
1748 index = outarg->offset >> PAGE_SHIFT; 1747 index = outarg->offset >> PAGE_SHIFT;
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1757 1756
1758 this_num = min_t(unsigned, num, PAGE_SIZE - offset); 1757 this_num = min_t(unsigned, num, PAGE_SIZE - offset);
1759 req->pages[req->num_pages] = page; 1758 req->pages[req->num_pages] = page;
1759 req->page_descs[req->num_pages].offset = offset;
1760 req->page_descs[req->num_pages].length = this_num; 1760 req->page_descs[req->num_pages].length = this_num;
1761 req->num_pages++; 1761 req->num_pages++;
1762 1762
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2077 2077
2078 ret = fuse_dev_do_write(fud, &cs, len); 2078 ret = fuse_dev_do_write(fud, &cs, len);
2079 2079
2080 pipe_lock(pipe);
2080 for (idx = 0; idx < nbuf; idx++) 2081 for (idx = 0; idx < nbuf; idx++)
2081 pipe_buf_release(pipe, &bufs[idx]); 2082 pipe_buf_release(pipe, &bufs[idx]);
2083 pipe_unlock(pipe);
2082 2084
2083out: 2085out:
2084 kvfree(bufs); 2086 kvfree(bufs);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index ffaffe18352a..a59c16bd90ac 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1782 spin_unlock(&fc->lock); 1782 spin_unlock(&fc->lock);
1783 1783
1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK); 1784 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1785 dec_node_page_state(page, NR_WRITEBACK_TEMP); 1785 dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
1786 wb_writeout_inc(&bdi->wb); 1786 wb_writeout_inc(&bdi->wb);
1787 fuse_writepage_free(fc, new_req); 1787 fuse_writepage_free(fc, new_req);
1788 fuse_request_free(new_req); 1788 fuse_request_free(new_req);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 76baaa6be393..c2d4099429be 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 628 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); 629 fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
630 fc->user_ns = get_user_ns(user_ns); 630 fc->user_ns = get_user_ns(user_ns);
631 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
631} 632}
632EXPORT_SYMBOL_GPL(fuse_conn_init); 633EXPORT_SYMBOL_GPL(fuse_conn_init);
633 634
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1162 fc->user_id = d.user_id; 1163 fc->user_id = d.user_id;
1163 fc->group_id = d.group_id; 1164 fc->group_id = d.group_id;
1164 fc->max_read = max_t(unsigned, 4096, d.max_read); 1165 fc->max_read = max_t(unsigned, 4096, d.max_read);
1165 fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
1166 1166
1167 /* Used by get_root_inode() */ 1167 /* Used by get_root_inode() */
1168 sb->s_fs_info = fc; 1168 sb->s_fs_info = fc;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 831d7cb5a49c..17a8d3b43990 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1780 goto next_iter; 1780 goto next_iter;
1781 } 1781 }
1782 if (ret == -E2BIG) { 1782 if (ret == -E2BIG) {
1783 n += rbm->bii - initial_bii;
1784 rbm->bii = 0; 1783 rbm->bii = 0;
1785 rbm->offset = 0; 1784 rbm->offset = 0;
1785 n += (rbm->bii - initial_bii);
1786 goto res_covered_end_of_rgrp; 1786 goto res_covered_end_of_rgrp;
1787 } 1787 }
1788 return ret; 1788 return ret;
diff --git a/fs/iomap.c b/fs/iomap.c
index a3088fae567b..897c60215dd1 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page)
116 atomic_set(&iop->read_count, 0); 116 atomic_set(&iop->read_count, 0);
117 atomic_set(&iop->write_count, 0); 117 atomic_set(&iop->write_count, 0);
118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); 118 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
119
120 /*
121 * migrate_page_move_mapping() assumes that pages with private data have
122 * their count elevated by 1.
123 */
124 get_page(page);
119 set_page_private(page, (unsigned long)iop); 125 set_page_private(page, (unsigned long)iop);
120 SetPagePrivate(page); 126 SetPagePrivate(page);
121 return iop; 127 return iop;
@@ -132,6 +138,7 @@ iomap_page_release(struct page *page)
132 WARN_ON_ONCE(atomic_read(&iop->write_count)); 138 WARN_ON_ONCE(atomic_read(&iop->write_count));
133 ClearPagePrivate(page); 139 ClearPagePrivate(page);
134 set_page_private(page, 0); 140 set_page_private(page, 0);
141 put_page(page);
135 kfree(iop); 142 kfree(iop);
136} 143}
137 144
@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
569 576
570 if (page_has_private(page)) { 577 if (page_has_private(page)) {
571 ClearPagePrivate(page); 578 ClearPagePrivate(page);
579 get_page(newpage);
572 set_page_private(newpage, page_private(page)); 580 set_page_private(newpage, page_private(page));
573 set_page_private(page, 0); 581 set_page_private(page, 0);
582 put_page(page);
574 SetPagePrivate(newpage); 583 SetPagePrivate(newpage);
575 } 584 }
576 585
@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1804 loff_t pos = iocb->ki_pos, start = pos; 1813 loff_t pos = iocb->ki_pos, start = pos;
1805 loff_t end = iocb->ki_pos + count - 1, ret = 0; 1814 loff_t end = iocb->ki_pos + count - 1, ret = 0;
1806 unsigned int flags = IOMAP_DIRECT; 1815 unsigned int flags = IOMAP_DIRECT;
1816 bool wait_for_completion = is_sync_kiocb(iocb);
1807 struct blk_plug plug; 1817 struct blk_plug plug;
1808 struct iomap_dio *dio; 1818 struct iomap_dio *dio;
1809 1819
@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1823 dio->end_io = end_io; 1833 dio->end_io = end_io;
1824 dio->error = 0; 1834 dio->error = 0;
1825 dio->flags = 0; 1835 dio->flags = 0;
1826 dio->wait_for_completion = is_sync_kiocb(iocb);
1827 1836
1828 dio->submit.iter = iter; 1837 dio->submit.iter = iter;
1829 dio->submit.waiter = current; 1838 dio->submit.waiter = current;
@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1878 dio_warn_stale_pagecache(iocb->ki_filp); 1887 dio_warn_stale_pagecache(iocb->ki_filp);
1879 ret = 0; 1888 ret = 0;
1880 1889
1881 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && 1890 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
1882 !inode->i_sb->s_dio_done_wq) { 1891 !inode->i_sb->s_dio_done_wq) {
1883 ret = sb_init_dio_done_wq(inode->i_sb); 1892 ret = sb_init_dio_done_wq(inode->i_sb);
1884 if (ret < 0) 1893 if (ret < 0)
@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1894 if (ret <= 0) { 1903 if (ret <= 0) {
1895 /* magic error code to fall back to buffered I/O */ 1904 /* magic error code to fall back to buffered I/O */
1896 if (ret == -ENOTBLK) { 1905 if (ret == -ENOTBLK) {
1897 dio->wait_for_completion = true; 1906 wait_for_completion = true;
1898 ret = 0; 1907 ret = 0;
1899 } 1908 }
1900 break; 1909 break;
@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1916 if (dio->flags & IOMAP_DIO_WRITE_FUA) 1925 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1917 dio->flags &= ~IOMAP_DIO_NEED_SYNC; 1926 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1918 1927
1928 /*
1929 * We are about to drop our additional submission reference, which
1930 * might be the last reference to the dio. There are three three
1931 * different ways we can progress here:
1932 *
1933 * (a) If this is the last reference we will always complete and free
1934 * the dio ourselves.
1935 * (b) If this is not the last reference, and we serve an asynchronous
1936 * iocb, we must never touch the dio after the decrement, the
1937 * I/O completion handler will complete and free it.
1938 * (c) If this is not the last reference, but we serve a synchronous
1939 * iocb, the I/O completion handler will wake us up on the drop
1940 * of the final reference, and we will complete and free it here
1941 * after we got woken by the I/O completion handler.
1942 */
1943 dio->wait_for_completion = wait_for_completion;
1919 if (!atomic_dec_and_test(&dio->ref)) { 1944 if (!atomic_dec_and_test(&dio->ref)) {
1920 if (!dio->wait_for_completion) 1945 if (!wait_for_completion)
1921 return -EIOCBQUEUED; 1946 return -EIOCBQUEUED;
1922 1947
1923 for (;;) { 1948 for (;;) {
@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1934 __set_current_state(TASK_RUNNING); 1959 __set_current_state(TASK_RUNNING);
1935 } 1960 }
1936 1961
1937 ret = iomap_dio_complete(dio); 1962 return iomap_dio_complete(dio);
1938
1939 return ret;
1940 1963
1941out_free_dio: 1964out_free_dio:
1942 kfree(dio); 1965 kfree(dio);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 22ce3c8a2f46..0570391eaa16 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name,
1895 size_t len; 1895 size_t len;
1896 char *end; 1896 char *end;
1897 1897
1898 if (unlikely(!dev_name || !*dev_name)) {
1899 dfprintk(MOUNT, "NFS: device name not specified\n");
1900 return -EINVAL;
1901 }
1902
1898 /* Is the host name protected with square brakcets? */ 1903 /* Is the host name protected with square brakcets? */
1899 if (*dev_name == '[') { 1904 if (*dev_name == '[') {
1900 end = strchr(++dev_name, ']'); 1905 end = strchr(++dev_name, ']');
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5a0bbf917a32..f12cb31a41e5 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
621 nfs_set_page_writeback(page); 621 nfs_set_page_writeback(page);
622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 622 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
623 623
624 ret = 0; 624 ret = req->wb_context->error;
625 /* If there is a fatal error that covers this write, just exit */ 625 /* If there is a fatal error that covers this write, just exit */
626 if (nfs_error_is_fatal_on_server(req->wb_context->error)) 626 if (nfs_error_is_fatal_on_server(ret))
627 goto out_launder; 627 goto out_launder;
628 628
629 ret = 0;
629 if (!nfs_pageio_add_request(pgio, req)) { 630 if (!nfs_pageio_add_request(pgio, req)) {
630 ret = pgio->pg_error; 631 ret = pgio->pg_error;
631 /* 632 /*
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
635 nfs_context_set_write_error(req->wb_context, ret); 636 nfs_context_set_write_error(req->wb_context, ret);
636 if (nfs_error_is_fatal_on_server(ret)) 637 if (nfs_error_is_fatal_on_server(ret))
637 goto out_launder; 638 goto out_launder;
638 } 639 } else
640 ret = -EAGAIN;
639 nfs_redirty_request(req); 641 nfs_redirty_request(req);
640 ret = -EAGAIN;
641 } else 642 } else
642 nfs_add_stats(page_file_mapping(page)->host, 643 nfs_add_stats(page_file_mapping(page)->host,
643 NFSIOS_WRITEPAGES, 1); 644 NFSIOS_WRITEPAGES, 1);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 9824e32b2f23..7dc98e14655d 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
557 loff_t cloned; 557 loff_t cloned;
558 558
559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); 559 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
560 if (cloned < 0)
561 return nfserrno(cloned);
560 if (count && cloned != count) 562 if (count && cloned != count)
561 cloned = -EINVAL; 563 return nfserrno(-EINVAL);
562 return nfserrno(cloned < 0 ? cloned : 0); 564 return 0;
563} 565}
564 566
565ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, 567ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 105576daca4a..798f1253141a 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
724 return -EBADF; 724 return -EBADF;
725 725
726 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ 726 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
727 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) 727 if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) {
728 return -EINVAL; 728 ret = -EINVAL;
729 goto fput_and_out;
730 }
729 731
730 /* verify that this is indeed an inotify instance */ 732 /* verify that this is indeed an inotify instance */
731 if (unlikely(f.file->f_op != &inotify_fops)) { 733 if (unlikely(f.file->f_op != &inotify_fops)) {
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae109429a88..e39bac94dead 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
256 inode = proc_get_inode(dir->i_sb, de); 256 inode = proc_get_inode(dir->i_sb, de);
257 if (!inode) 257 if (!inode)
258 return ERR_PTR(-ENOMEM); 258 return ERR_PTR(-ENOMEM);
259 d_set_d_op(dentry, &proc_misc_dentry_ops); 259 d_set_d_op(dentry, de->proc_dops);
260 return d_splice_alias(inode, dentry); 260 return d_splice_alias(inode, dentry);
261 } 261 }
262 read_unlock(&proc_subdir_lock); 262 read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
429 INIT_LIST_HEAD(&ent->pde_openers); 429 INIT_LIST_HEAD(&ent->pde_openers);
430 proc_set_user(ent, (*parent)->uid, (*parent)->gid); 430 proc_set_user(ent, (*parent)->uid, (*parent)->gid);
431 431
432 ent->proc_dops = &proc_misc_dentry_ops;
433
432out: 434out:
433 return ent; 435 return ent;
434} 436}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5185d7f6a51e..95b14196f284 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@ struct proc_dir_entry {
44 struct completion *pde_unload_completion; 44 struct completion *pde_unload_completion;
45 const struct inode_operations *proc_iops; 45 const struct inode_operations *proc_iops;
46 const struct file_operations *proc_fops; 46 const struct file_operations *proc_fops;
47 const struct dentry_operations *proc_dops;
47 union { 48 union {
48 const struct seq_operations *seq_ops; 49 const struct seq_operations *seq_ops;
49 int (*single_show)(struct seq_file *, void *); 50 int (*single_show)(struct seq_file *, void *);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb3439e..a7b12435519e 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
38 return maybe_get_net(PDE_NET(PDE(inode))); 38 return maybe_get_net(PDE_NET(PDE(inode)));
39} 39}
40 40
41static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
42{
43 return 0;
44}
45
46static const struct dentry_operations proc_net_dentry_ops = {
47 .d_revalidate = proc_net_d_revalidate,
48 .d_delete = always_delete_dentry,
49};
50
51static void pde_force_lookup(struct proc_dir_entry *pde)
52{
53 /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
54 pde->proc_dops = &proc_net_dentry_ops;
55}
56
41static int seq_open_net(struct inode *inode, struct file *file) 57static int seq_open_net(struct inode *inode, struct file *file)
42{ 58{
43 unsigned int state_size = PDE(inode)->state_size; 59 unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
90 p = proc_create_reg(name, mode, &parent, data); 106 p = proc_create_reg(name, mode, &parent, data);
91 if (!p) 107 if (!p)
92 return NULL; 108 return NULL;
109 pde_force_lookup(p);
93 p->proc_fops = &proc_net_seq_fops; 110 p->proc_fops = &proc_net_seq_fops;
94 p->seq_ops = ops; 111 p->seq_ops = ops;
95 p->state_size = state_size; 112 p->state_size = state_size;
@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
133 p = proc_create_reg(name, mode, &parent, data); 150 p = proc_create_reg(name, mode, &parent, data);
134 if (!p) 151 if (!p)
135 return NULL; 152 return NULL;
153 pde_force_lookup(p);
136 p->proc_fops = &proc_net_seq_fops; 154 p->proc_fops = &proc_net_seq_fops;
137 p->seq_ops = ops; 155 p->seq_ops = ops;
138 p->state_size = state_size; 156 p->state_size = state_size;
@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
181 p = proc_create_reg(name, mode, &parent, data); 199 p = proc_create_reg(name, mode, &parent, data);
182 if (!p) 200 if (!p)
183 return NULL; 201 return NULL;
202 pde_force_lookup(p);
184 p->proc_fops = &proc_net_single_fops; 203 p->proc_fops = &proc_net_single_fops;
185 p->single_show = show; 204 p->single_show = show;
186 return proc_register(parent, p); 205 return proc_register(parent, p);
@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
223 p = proc_create_reg(name, mode, &parent, data); 242 p = proc_create_reg(name, mode, &parent, data);
224 if (!p) 243 if (!p)
225 return NULL; 244 return NULL;
245 pde_force_lookup(p);
226 p->proc_fops = &proc_net_single_fops; 246 p->proc_fops = &proc_net_single_fops;
227 p->single_show = show; 247 p->single_show = show;
228 p->write = write; 248 p->write = write;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 535eda7857cf..76175211b304 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -79,6 +79,31 @@ static u64 get_iowait_time(int cpu)
79 79
80#endif 80#endif
81 81
82static void show_irq_gap(struct seq_file *p, unsigned int gap)
83{
84 static const char zeros[] = " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0";
85
86 while (gap > 0) {
87 unsigned int inc;
88
89 inc = min_t(unsigned int, gap, ARRAY_SIZE(zeros) / 2);
90 seq_write(p, zeros, 2 * inc);
91 gap -= inc;
92 }
93}
94
95static void show_all_irqs(struct seq_file *p)
96{
97 unsigned int i, next = 0;
98
99 for_each_active_irq(i) {
100 show_irq_gap(p, i - next);
101 seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
102 next = i + 1;
103 }
104 show_irq_gap(p, nr_irqs - next);
105}
106
82static int show_stat(struct seq_file *p, void *v) 107static int show_stat(struct seq_file *p, void *v)
83{ 108{
84 int i, j; 109 int i, j;
@@ -156,9 +181,7 @@ static int show_stat(struct seq_file *p, void *v)
156 } 181 }
157 seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); 182 seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
158 183
159 /* sum again ? it could be updated? */ 184 show_all_irqs(p);
160 for_each_irq_nr(j)
161 seq_put_decimal_ull(p, " ", kstat_irqs_usr(j));
162 185
163 seq_printf(p, 186 seq_printf(p,
164 "\nctxt %llu\n" 187 "\nctxt %llu\n"
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 7b24fc791146..228a5e234af0 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -71,7 +71,6 @@
71#define MMP2_CLK_CCIC1_MIX 117 71#define MMP2_CLK_CCIC1_MIX 117
72#define MMP2_CLK_CCIC1_PHY 118 72#define MMP2_CLK_CCIC1_PHY 118
73#define MMP2_CLK_CCIC1_SPHY 119 73#define MMP2_CLK_CCIC1_SPHY 119
74#define MMP2_CLK_SP 120
75 74
76#define MMP2_NR_CLKS 200 75#define MMP2_NR_CLKS 200
77#endif 76#endif
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c31157135598..07e02d6df5ad 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -190,6 +190,7 @@ struct backing_dev_info {
190 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 190 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
191 struct rb_root cgwb_congested_tree; /* their congested states */ 191 struct rb_root cgwb_congested_tree; /* their congested states */
192 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ 192 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
193 struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
193#else 194#else
194 struct bdi_writeback_congested *wb_congested; 195 struct bdi_writeback_congested *wb_congested;
195#endif 196#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 5c7e7f859a24..d66bf5f32610 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -287,7 +287,7 @@ enum req_opf {
287 REQ_OP_DISCARD = 3, 287 REQ_OP_DISCARD = 3,
288 /* securely erase sectors */ 288 /* securely erase sectors */
289 REQ_OP_SECURE_ERASE = 5, 289 REQ_OP_SECURE_ERASE = 5,
290 /* seset a zone write pointer */ 290 /* reset a zone write pointer */
291 REQ_OP_ZONE_RESET = 6, 291 REQ_OP_ZONE_RESET = 6,
292 /* write the same sector many times */ 292 /* write the same sector many times */
293 REQ_OP_WRITE_SAME = 7, 293 REQ_OP_WRITE_SAME = 7,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 218df7f4d3e1..5041357d0297 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) 180#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
181extern enum cpuhp_smt_control cpu_smt_control; 181extern enum cpuhp_smt_control cpu_smt_control;
182extern void cpu_smt_disable(bool force); 182extern void cpu_smt_disable(bool force);
183extern void cpu_smt_check_topology_early(void);
184extern void cpu_smt_check_topology(void); 183extern void cpu_smt_check_topology(void);
185#else 184#else
186# define cpu_smt_control (CPU_SMT_ENABLED) 185# define cpu_smt_control (CPU_SMT_ENABLED)
187static inline void cpu_smt_disable(bool force) { } 186static inline void cpu_smt_disable(bool force) { }
188static inline void cpu_smt_check_topology_early(void) { }
189static inline void cpu_smt_check_topology(void) { } 187static inline void cpu_smt_check_topology(void) { }
190#endif 188#endif
191 189
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index ef4b70f64f33..60996e64c579 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -62,9 +62,10 @@ extern const struct qstr slash_name;
62struct dentry_stat_t { 62struct dentry_stat_t {
63 long nr_dentry; 63 long nr_dentry;
64 long nr_unused; 64 long nr_unused;
65 long age_limit; /* age in seconds */ 65 long age_limit; /* age in seconds */
66 long want_pages; /* pages requested by system */ 66 long want_pages; /* pages requested by system */
67 long dummy[2]; 67 long nr_negative; /* # of unused negative dentries */
68 long dummy; /* Reserved for future use */
68}; 69};
69extern struct dentry_stat_t dentry_stat; 70extern struct dentry_stat_t dentry_stat;
70 71
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 811c77743dad..29d8e2cfed0e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1479,11 +1479,12 @@ struct super_block {
1479 struct user_namespace *s_user_ns; 1479 struct user_namespace *s_user_ns;
1480 1480
1481 /* 1481 /*
1482 * Keep the lru lists last in the structure so they always sit on their 1482 * The list_lru structure is essentially just a pointer to a table
1483 * own individual cachelines. 1483 * of per-node lru lists, each of which has its own spinlock.
1484 * There is no need to put them into separate cachelines.
1484 */ 1485 */
1485 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1486 struct list_lru s_dentry_lru;
1486 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1487 struct list_lru s_inode_lru;
1487 struct rcu_head rcu; 1488 struct rcu_head rcu;
1488 struct work_struct destroy_work; 1489 struct work_struct destroy_work;
1489 1490
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563..2d6100edf204 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
24 24
25#ifdef CONFIG_DEBUG_FS 25#ifdef CONFIG_DEBUG_FS
26 26
27#include <linux/kfifo.h>
28
27#define HID_DEBUG_BUFSIZE 512 29#define HID_DEBUG_BUFSIZE 512
30#define HID_DEBUG_FIFOSIZE 512
28 31
29void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); 32void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
30void hid_dump_report(struct hid_device *, int , u8 *, int); 33void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
37void hid_debug_exit(void); 40void hid_debug_exit(void);
38void hid_debug_event(struct hid_device *, char *); 41void hid_debug_event(struct hid_device *, char *);
39 42
40
41struct hid_debug_list { 43struct hid_debug_list {
42 char *hid_debug_buf; 44 DECLARE_KFIFO_PTR(hid_debug_fifo, char);
43 int head;
44 int tail;
45 struct fasync_struct *fasync; 45 struct fasync_struct *fasync;
46 struct hid_device *hdev; 46 struct hid_device *hdev;
47 struct list_head node; 47 struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
64#endif 64#endif
65 65
66#endif 66#endif
67
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d99287327ef2..f9707d1dcb58 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -430,7 +430,7 @@ struct hid_local {
430 */ 430 */
431 431
432struct hid_collection { 432struct hid_collection {
433 struct hid_collection *parent; 433 int parent_idx; /* device->collection */
434 unsigned type; 434 unsigned type;
435 unsigned usage; 435 unsigned usage;
436 unsigned level; 436 unsigned level;
@@ -658,7 +658,6 @@ struct hid_parser {
658 unsigned int *collection_stack; 658 unsigned int *collection_stack;
659 unsigned int collection_stack_ptr; 659 unsigned int collection_stack_ptr;
660 unsigned int collection_stack_size; 660 unsigned int collection_stack_size;
661 struct hid_collection *active_collection;
662 struct hid_device *device; 661 struct hid_device *device;
663 unsigned int scan_flags; 662 unsigned int scan_flags;
664}; 663};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f0885cc01db6..dcb6977afce9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1159,8 +1159,9 @@ struct hv_ring_buffer_debug_info {
1159 u32 bytes_avail_towrite; 1159 u32 bytes_avail_towrite;
1160}; 1160};
1161 1161
1162void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 1162
1163 struct hv_ring_buffer_debug_info *debug_info); 1163int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
1164 struct hv_ring_buffer_debug_info *debug_info);
1164 1165
1165/* Vmbus interface */ 1166/* Vmbus interface */
1166#define vmbus_driver_register(driver) \ 1167#define vmbus_driver_register(driver) \
diff --git a/include/linux/ide.h b/include/linux/ide.h
index e7d29ae633cd..971cf76a78a0 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -615,6 +615,7 @@ struct ide_drive_s {
615 615
616 /* current sense rq and buffer */ 616 /* current sense rq and buffer */
617 bool sense_rq_armed; 617 bool sense_rq_armed;
618 bool sense_rq_active;
618 struct request *sense_rq; 619 struct request *sense_rq;
619 struct request_sense sense_data; 620 struct request_sense sense_data;
620 621
@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1219extern void ide_timer_expiry(struct timer_list *t); 1220extern void ide_timer_expiry(struct timer_list *t);
1220extern irqreturn_t ide_intr(int irq, void *dev_id); 1221extern irqreturn_t ide_intr(int irq, void *dev_id);
1221extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 1222extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
1223extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
1222extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); 1224extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1223 1225
1224void ide_init_disk(struct gendisk *, ide_drive_t *); 1226void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 6756fea18b69..e44746de95cd 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
54 case ARPHRD_IPGRE: 54 case ARPHRD_IPGRE:
55 case ARPHRD_VOID: 55 case ARPHRD_VOID:
56 case ARPHRD_NONE: 56 case ARPHRD_NONE:
57 case ARPHRD_RAWIP:
57 return false; 58 return false;
58 default: 59 default:
59 return true; 60 return true;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 831ddcdc5597..690b238a44d5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -259,25 +259,35 @@ struct irq_affinity_notify {
259 void (*release)(struct kref *ref); 259 void (*release)(struct kref *ref);
260}; 260};
261 261
262#define IRQ_AFFINITY_MAX_SETS 4
263
262/** 264/**
263 * struct irq_affinity - Description for automatic irq affinity assignements 265 * struct irq_affinity - Description for automatic irq affinity assignements
264 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of 266 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
265 * the MSI(-X) vector space 267 * the MSI(-X) vector space
266 * @post_vectors: Don't apply affinity to @post_vectors at end of 268 * @post_vectors: Don't apply affinity to @post_vectors at end of
267 * the MSI(-X) vector space 269 * the MSI(-X) vector space
268 * @nr_sets: Length of passed in *sets array 270 * @nr_sets: The number of interrupt sets for which affinity
269 * @sets: Number of affinitized sets 271 * spreading is required
272 * @set_size: Array holding the size of each interrupt set
273 * @calc_sets: Callback for calculating the number and size
274 * of interrupt sets
275 * @priv: Private data for usage by @calc_sets, usually a
276 * pointer to driver/device specific data.
270 */ 277 */
271struct irq_affinity { 278struct irq_affinity {
272 int pre_vectors; 279 unsigned int pre_vectors;
273 int post_vectors; 280 unsigned int post_vectors;
274 int nr_sets; 281 unsigned int nr_sets;
275 int *sets; 282 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
283 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
284 void *priv;
276}; 285};
277 286
278/** 287/**
279 * struct irq_affinity_desc - Interrupt affinity descriptor 288 * struct irq_affinity_desc - Interrupt affinity descriptor
280 * @mask: cpumask to hold the affinity assignment 289 * @mask: cpumask to hold the affinity assignment
290 * @is_managed: 1 if the interrupt is managed internally
281 */ 291 */
282struct irq_affinity_desc { 292struct irq_affinity_desc {
283 struct cpumask mask; 293 struct cpumask mask;
@@ -331,9 +341,10 @@ extern int
331irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 341irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
332 342
333struct irq_affinity_desc * 343struct irq_affinity_desc *
334irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); 344irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
335 345
336int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); 346unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
347 const struct irq_affinity *affd);
337 348
338#else /* CONFIG_SMP */ 349#else /* CONFIG_SMP */
339 350
@@ -367,13 +378,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
367} 378}
368 379
369static inline struct irq_affinity_desc * 380static inline struct irq_affinity_desc *
370irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) 381irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
371{ 382{
372 return NULL; 383 return NULL;
373} 384}
374 385
375static inline int 386static inline unsigned int
376irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) 387irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
388 const struct irq_affinity *affd)
377{ 389{
378 return maxvec; 390 return maxvec;
379} 391}
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index ba05b0d6401a..d6e2ab538ef2 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -28,6 +28,7 @@ struct pt_regs;
28 * @core_internal_state__do_not_mess_with_it: core internal status information 28 * @core_internal_state__do_not_mess_with_it: core internal status information
29 * @depth: disable-depth, for nested irq_disable() calls 29 * @depth: disable-depth, for nested irq_disable() calls
30 * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers 30 * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
31 * @tot_count: stats field for non-percpu irqs
31 * @irq_count: stats field to detect stalled irqs 32 * @irq_count: stats field to detect stalled irqs
32 * @last_unhandled: aging timer for unhandled count 33 * @last_unhandled: aging timer for unhandled count
33 * @irqs_unhandled: stats field for spurious unhandled interrupts 34 * @irqs_unhandled: stats field for spurious unhandled interrupts
@@ -65,6 +66,7 @@ struct irq_desc {
65 unsigned int core_internal_state__do_not_mess_with_it; 66 unsigned int core_internal_state__do_not_mess_with_it;
66 unsigned int depth; /* nested irq disables */ 67 unsigned int depth; /* nested irq disables */
67 unsigned int wake_depth; /* nested wake enables */ 68 unsigned int wake_depth; /* nested wake enables */
69 unsigned int tot_count;
68 unsigned int irq_count; /* For detecting broken IRQs */ 70 unsigned int irq_count; /* For detecting broken IRQs */
69 unsigned long last_unhandled; /* Aging timer for unhandled count */ 71 unsigned long last_unhandled; /* Aging timer for unhandled count */
70 unsigned int irqs_unhandled; 72 unsigned int irqs_unhandled;
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c1961761311d..1577a2d56e9d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -56,6 +56,7 @@ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
56int kthread_stop(struct task_struct *k); 56int kthread_stop(struct task_struct *k);
57bool kthread_should_stop(void); 57bool kthread_should_stop(void);
58bool kthread_should_park(void); 58bool kthread_should_park(void);
59bool __kthread_should_park(struct task_struct *k);
59bool kthread_freezable_should_stop(bool *was_frozen); 60bool kthread_freezable_should_stop(bool *was_frozen);
60void *kthread_data(struct task_struct *k); 61void *kthread_data(struct task_struct *k);
61void *kthread_probe_data(struct task_struct *k); 62void *kthread_probe_data(struct task_struct *k);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 7315977b64da..ad609617aeb8 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -235,7 +235,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
235 cmd_mask, num_flush, flush_wpq, NULL, NULL); 235 cmd_mask, num_flush, flush_wpq, NULL, NULL);
236} 236}
237 237
238int nvdimm_security_setup_events(struct nvdimm *nvdimm);
239const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); 238const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
240const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); 239const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
241u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, 240u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 07da5c6c5ba0..368267c1b71b 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -21,14 +21,16 @@ struct vmem_altmap;
21 * walkers which rely on the fully initialized page->flags and others 21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page 22 * should use this rather than pfn_valid && pfn_to_page
23 */ 23 */
24#define pfn_to_online_page(pfn) \ 24#define pfn_to_online_page(pfn) \
25({ \ 25({ \
26 struct page *___page = NULL; \ 26 struct page *___page = NULL; \
27 unsigned long ___nr = pfn_to_section_nr(pfn); \ 27 unsigned long ___pfn = pfn; \
28 \ 28 unsigned long ___nr = pfn_to_section_nr(___pfn); \
29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ 29 \
30 ___page = pfn_to_page(pfn); \ 30 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
31 ___page; \ 31 pfn_valid_within(___pfn)) \
32 ___page = pfn_to_page(___pfn); \
33 ___page; \
32}) 34})
33 35
34/* 36/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 65f1d8c2f082..e7c51b00cdfe 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1393,7 +1393,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
1393} 1393}
1394int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1394int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1395 unsigned int max_vecs, unsigned int flags, 1395 unsigned int max_vecs, unsigned int flags,
1396 const struct irq_affinity *affd); 1396 struct irq_affinity *affd);
1397 1397
1398void pci_free_irq_vectors(struct pci_dev *dev); 1398void pci_free_irq_vectors(struct pci_dev *dev);
1399int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1399int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
@@ -1419,7 +1419,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
1419static inline int 1419static inline int
1420pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1420pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1421 unsigned int max_vecs, unsigned int flags, 1421 unsigned int max_vecs, unsigned int flags,
1422 const struct irq_affinity *aff_desc) 1422 struct irq_affinity *aff_desc)
1423{ 1423{
1424 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) 1424 if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
1425 return 1; 1425 return 1;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 54af4eef169f..fed5be706bc9 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
105 105
106static inline void pm_runtime_mark_last_busy(struct device *dev) 106static inline void pm_runtime_mark_last_busy(struct device *dev)
107{ 107{
108 WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); 108 WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
109} 109}
110 110
111static inline bool pm_runtime_is_irq_safe(struct device *dev) 111static inline bool pm_runtime_is_irq_safe(struct device *dev)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2f90fa92468..bba3afb4e9bf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -995,7 +995,7 @@ struct task_struct {
995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
996 struct list_head cg_list; 996 struct list_head cg_list;
997#endif 997#endif
998#ifdef CONFIG_X86_RESCTRL 998#ifdef CONFIG_X86_CPU_RESCTRL
999 u32 closid; 999 u32 closid;
1000 u32 rmid; 1000 u32 rmid;
1001#endif 1001#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d01126f..ecdc6542070f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ 73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
74#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 75#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
75 76
76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 77#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 10b19a192b2d..545f37138057 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -24,9 +24,13 @@
24 * called near the end of a function. Otherwise, the list can be 24 * called near the end of a function. Otherwise, the list can be
25 * re-initialized for later re-use by wake_q_init(). 25 * re-initialized for later re-use by wake_q_init().
26 * 26 *
27 * Note that this can cause spurious wakeups. schedule() callers 27 * NOTE that this can cause spurious wakeups. schedule() callers
28 * must ensure the call is done inside a loop, confirming that the 28 * must ensure the call is done inside a loop, confirming that the
29 * wakeup condition has in fact occurred. 29 * wakeup condition has in fact occurred.
30 *
31 * NOTE that there is no guarantee the wakeup will happen any later than the
32 * wake_q_add() location. Therefore task must be ready to be woken at the
33 * location of the wake_q_add().
30 */ 34 */
31 35
32#include <linux/sched.h> 36#include <linux/sched.h>
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index f492e21c4aa2..5d9d318bcf7a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -176,7 +176,8 @@ static inline bool xa_is_internal(const void *entry)
176 */ 176 */
177static inline bool xa_is_err(const void *entry) 177static inline bool xa_is_err(const void *entry)
178{ 178{
179 return unlikely(xa_is_internal(entry)); 179 return unlikely(xa_is_internal(entry) &&
180 entry >= xa_mk_internal(-MAX_ERRNO));
180} 181}
181 182
182/** 183/**
@@ -286,7 +287,6 @@ struct xarray {
286 */ 287 */
287#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) 288#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
288 289
289void xa_init_flags(struct xarray *, gfp_t flags);
290void *xa_load(struct xarray *, unsigned long index); 290void *xa_load(struct xarray *, unsigned long index);
291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 291void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
292void *xa_erase(struct xarray *, unsigned long index); 292void *xa_erase(struct xarray *, unsigned long index);
@@ -304,6 +304,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
304void xa_destroy(struct xarray *); 304void xa_destroy(struct xarray *);
305 305
306/** 306/**
307 * xa_init_flags() - Initialise an empty XArray with flags.
308 * @xa: XArray.
309 * @flags: XA_FLAG values.
310 *
311 * If you need to initialise an XArray with special flags (eg you need
312 * to take the lock from interrupt context), use this function instead
313 * of xa_init().
314 *
315 * Context: Any context.
316 */
317static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
318{
319 spin_lock_init(&xa->xa_lock);
320 xa->xa_flags = flags;
321 xa->xa_head = NULL;
322}
323
324/**
307 * xa_init() - Initialise an empty XArray. 325 * xa_init() - Initialise an empty XArray.
308 * @xa: XArray. 326 * @xa: XArray.
309 * 327 *
@@ -342,20 +360,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
342} 360}
343 361
344/** 362/**
345 * xa_for_each() - Iterate over a portion of an XArray. 363 * xa_for_each_start() - Iterate over a portion of an XArray.
346 * @xa: XArray. 364 * @xa: XArray.
365 * @index: Index of @entry.
347 * @entry: Entry retrieved from array. 366 * @entry: Entry retrieved from array.
367 * @start: First index to retrieve from array.
368 *
369 * During the iteration, @entry will have the value of the entry stored
370 * in @xa at @index. You may modify @index during the iteration if you
371 * want to skip or reprocess indices. It is safe to modify the array
372 * during the iteration. At the end of the iteration, @entry will be set
373 * to NULL and @index will have a value less than or equal to max.
374 *
375 * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
376 * to handle your own locking with xas_for_each(), and if you have to unlock
377 * after each iteration, it will also end up being O(n.log(n)).
378 * xa_for_each_start() will spin if it hits a retry entry; if you intend to
379 * see retry entries, you should use the xas_for_each() iterator instead.
380 * The xas_for_each() iterator will expand into more inline code than
381 * xa_for_each_start().
382 *
383 * Context: Any context. Takes and releases the RCU lock.
384 */
385#define xa_for_each_start(xa, index, entry, start) \
386 for (index = start, \
387 entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
388 entry; \
389 entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
390
391/**
392 * xa_for_each() - Iterate over present entries in an XArray.
393 * @xa: XArray.
348 * @index: Index of @entry. 394 * @index: Index of @entry.
349 * @max: Maximum index to retrieve from array. 395 * @entry: Entry retrieved from array.
350 * @filter: Selection criterion.
351 * 396 *
352 * Initialise @index to the lowest index you want to retrieve from the 397 * During the iteration, @entry will have the value of the entry stored
353 * array. During the iteration, @entry will have the value of the entry 398 * in @xa at @index. You may modify @index during the iteration if you want
354 * stored in @xa at @index. The iteration will skip all entries in the 399 * to skip or reprocess indices. It is safe to modify the array during the
355 * array which do not match @filter. You may modify @index during the 400 * iteration. At the end of the iteration, @entry will be set to NULL and
356 * iteration if you want to skip or reprocess indices. It is safe to modify 401 * @index will have a value less than or equal to max.
357 * the array during the iteration. At the end of the iteration, @entry will
358 * be set to NULL and @index will have a value less than or equal to max.
359 * 402 *
360 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have 403 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
361 * to handle your own locking with xas_for_each(), and if you have to unlock 404 * to handle your own locking with xas_for_each(), and if you have to unlock
@@ -366,9 +409,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
366 * 409 *
367 * Context: Any context. Takes and releases the RCU lock. 410 * Context: Any context. Takes and releases the RCU lock.
368 */ 411 */
369#define xa_for_each(xa, entry, index, max, filter) \ 412#define xa_for_each(xa, index, entry) \
370 for (entry = xa_find(xa, &index, max, filter); entry; \ 413 xa_for_each_start(xa, index, entry, 0)
371 entry = xa_find_after(xa, &index, max, filter)) 414
415/**
416 * xa_for_each_marked() - Iterate over marked entries in an XArray.
417 * @xa: XArray.
418 * @index: Index of @entry.
419 * @entry: Entry retrieved from array.
420 * @filter: Selection criterion.
421 *
422 * During the iteration, @entry will have the value of the entry stored
423 * in @xa at @index. The iteration will skip all entries in the array
424 * which do not match @filter. You may modify @index during the iteration
425 * if you want to skip or reprocess indices. It is safe to modify the array
426 * during the iteration. At the end of the iteration, @entry will be set to
427 * NULL and @index will have a value less than or equal to max.
428 *
429 * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
430 * You have to handle your own locking with xas_for_each(), and if you have
431 * to unlock after each iteration, it will also end up being O(n.log(n)).
432 * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
433 * see retry entries, you should use the xas_for_each_marked() iterator
434 * instead. The xas_for_each_marked() iterator will expand into more inline
435 * code than xa_for_each_marked().
436 *
437 * Context: Any context. Takes and releases the RCU lock.
438 */
439#define xa_for_each_marked(xa, index, entry, filter) \
440 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
441 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
372 442
373#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) 443#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
374#define xa_lock(xa) spin_lock(&(xa)->xa_lock) 444#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
@@ -393,40 +463,13 @@ void *__xa_erase(struct xarray *, unsigned long index);
393void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 463void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
394void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, 464void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
395 void *entry, gfp_t); 465 void *entry, gfp_t);
466int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
396int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); 467int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
397int __xa_reserve(struct xarray *, unsigned long index, gfp_t); 468int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
398void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 469void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
399void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 470void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
400 471
401/** 472/**
402 * __xa_insert() - Store this entry in the XArray unless another entry is
403 * already present.
404 * @xa: XArray.
405 * @index: Index into array.
406 * @entry: New entry.
407 * @gfp: Memory allocation flags.
408 *
409 * If you would rather see the existing entry in the array, use __xa_cmpxchg().
410 * This function is for users who don't care what the entry is, only that
411 * one is present.
412 *
413 * Context: Any context. Expects xa_lock to be held on entry. May
414 * release and reacquire xa_lock if the @gfp flags permit.
415 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
416 * -ENOMEM if memory could not be allocated.
417 */
418static inline int __xa_insert(struct xarray *xa, unsigned long index,
419 void *entry, gfp_t gfp)
420{
421 void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
422 if (!curr)
423 return 0;
424 if (xa_is_err(curr))
425 return xa_err(curr);
426 return -EEXIST;
427}
428
429/**
430 * xa_store_bh() - Store this entry in the XArray. 473 * xa_store_bh() - Store this entry in the XArray.
431 * @xa: XArray. 474 * @xa: XArray.
432 * @index: Index into array. 475 * @index: Index into array.
@@ -453,7 +496,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
453} 496}
454 497
455/** 498/**
456 * xa_store_irq() - Erase this entry from the XArray. 499 * xa_store_irq() - Store this entry in the XArray.
457 * @xa: XArray. 500 * @xa: XArray.
458 * @index: Index into array. 501 * @index: Index into array.
459 * @entry: New entry. 502 * @entry: New entry.
@@ -615,24 +658,83 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
615 * @entry: New entry. 658 * @entry: New entry.
616 * @gfp: Memory allocation flags. 659 * @gfp: Memory allocation flags.
617 * 660 *
618 * If you would rather see the existing entry in the array, use xa_cmpxchg(). 661 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
619 * This function is for users who don't care what the entry is, only that 662 * if no entry is present. Inserting will fail if a reserved entry is
620 * one is present. 663 * present, even though loading from this index will return NULL.
621 * 664 *
622 * Context: Process context. Takes and releases the xa_lock. 665 * Context: Any context. Takes and releases the xa_lock. May sleep if
623 * May sleep if the @gfp flags permit. 666 * the @gfp flags permit.
624 * Return: 0 if the store succeeded. -EEXIST if another entry was present. 667 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
625 * -ENOMEM if memory could not be allocated. 668 * -ENOMEM if memory could not be allocated.
626 */ 669 */
627static inline int xa_insert(struct xarray *xa, unsigned long index, 670static inline int xa_insert(struct xarray *xa, unsigned long index,
628 void *entry, gfp_t gfp) 671 void *entry, gfp_t gfp)
629{ 672{
630 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); 673 int err;
631 if (!curr) 674
632 return 0; 675 xa_lock(xa);
633 if (xa_is_err(curr)) 676 err = __xa_insert(xa, index, entry, gfp);
634 return xa_err(curr); 677 xa_unlock(xa);
635 return -EEXIST; 678
679 return err;
680}
681
682/**
683 * xa_insert_bh() - Store this entry in the XArray unless another entry is
684 * already present.
685 * @xa: XArray.
686 * @index: Index into array.
687 * @entry: New entry.
688 * @gfp: Memory allocation flags.
689 *
690 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
691 * if no entry is present. Inserting will fail if a reserved entry is
692 * present, even though loading from this index will return NULL.
693 *
694 * Context: Any context. Takes and releases the xa_lock while
695 * disabling softirqs. May sleep if the @gfp flags permit.
696 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
697 * -ENOMEM if memory could not be allocated.
698 */
699static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
700 void *entry, gfp_t gfp)
701{
702 int err;
703
704 xa_lock_bh(xa);
705 err = __xa_insert(xa, index, entry, gfp);
706 xa_unlock_bh(xa);
707
708 return err;
709}
710
711/**
712 * xa_insert_irq() - Store this entry in the XArray unless another entry is
713 * already present.
714 * @xa: XArray.
715 * @index: Index into array.
716 * @entry: New entry.
717 * @gfp: Memory allocation flags.
718 *
719 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
720 * if no entry is present. Inserting will fail if a reserved entry is
721 * present, even though loading from this index will return NULL.
722 *
723 * Context: Process context. Takes and releases the xa_lock while
724 * disabling interrupts. May sleep if the @gfp flags permit.
725 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
726 * -ENOMEM if memory could not be allocated.
727 */
728static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
729 void *entry, gfp_t gfp)
730{
731 int err;
732
733 xa_lock_irq(xa);
734 err = __xa_insert(xa, index, entry, gfp);
735 xa_unlock_irq(xa);
736
737 return err;
636} 738}
637 739
638/** 740/**
@@ -970,8 +1072,8 @@ static inline bool xa_is_sibling(const void *entry)
970 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); 1072 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
971} 1073}
972 1074
973#define XA_ZERO_ENTRY xa_mk_internal(256) 1075#define XA_RETRY_ENTRY xa_mk_internal(256)
974#define XA_RETRY_ENTRY xa_mk_internal(257) 1076#define XA_ZERO_ENTRY xa_mk_internal(257)
975 1077
976/** 1078/**
977 * xa_is_zero() - Is the entry a zero entry? 1079 * xa_is_zero() - Is the entry a zero entry?
@@ -996,6 +1098,17 @@ static inline bool xa_is_retry(const void *entry)
996} 1098}
997 1099
998/** 1100/**
1101 * xa_is_advanced() - Is the entry only permitted for the advanced API?
1102 * @entry: Entry to be stored in the XArray.
1103 *
1104 * Return: %true if the entry cannot be stored by the normal API.
1105 */
1106static inline bool xa_is_advanced(const void *entry)
1107{
1108 return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1109}
1110
1111/**
999 * typedef xa_update_node_t - A callback function from the XArray. 1112 * typedef xa_update_node_t - A callback function from the XArray.
1000 * @node: The node which is being processed 1113 * @node: The node which is being processed
1001 * 1114 *
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 3f9aea8087e3..8b7eb46ad72d 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
201 201
202void __ax25_put_route(ax25_route *ax25_rt); 202void __ax25_put_route(ax25_route *ax25_rt);
203 203
204extern rwlock_t ax25_route_lock;
205
206static inline void ax25_route_lock_use(void)
207{
208 read_lock(&ax25_route_lock);
209}
210
211static inline void ax25_route_lock_unuse(void)
212{
213 read_unlock(&ax25_route_lock);
214}
215
204static inline void ax25_put_route(ax25_route *ax25_rt) 216static inline void ax25_put_route(ax25_route *ax25_rt)
205{ 217{
206 if (refcount_dec_and_test(&ax25_rt->refcount)) 218 if (refcount_dec_and_test(&ax25_rt->refcount))
diff --git a/include/net/tls.h b/include/net/tls.h
index 2a6ac8d642af..1486b60c4de8 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -120,6 +120,8 @@ struct tls_rec {
120 struct scatterlist sg_aead_out[2]; 120 struct scatterlist sg_aead_out[2];
121 121
122 char aad_space[TLS_AAD_SPACE_SIZE]; 122 char aad_space[TLS_AAD_SPACE_SIZE];
123 u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
124 TLS_CIPHER_AES_GCM_128_SALT_SIZE];
123 struct aead_request aead_req; 125 struct aead_request aead_req;
124 u8 aead_req_ctx[]; 126 u8 aead_req_ctx[];
125}; 127};
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a3ceed3a040a..80debf5982ac 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2579,9 +2579,10 @@ struct ib_device {
2579 2579
2580 const struct uapi_definition *driver_def; 2580 const struct uapi_definition *driver_def;
2581 enum rdma_driver_id driver_id; 2581 enum rdma_driver_id driver_id;
2582
2582 /* 2583 /*
2583 * Provides synchronization between device unregistration and netlink 2584 * Positive refcount indicates that the device is currently
2584 * commands on a device. To be used only by core. 2585 * registered and cannot be unregistered.
2585 */ 2586 */
2586 refcount_t refcount; 2587 refcount_t refcount;
2587 struct completion unreg_completion; 2588 struct completion unreg_completion;
@@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags)
3926int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3927int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
3927 struct ib_mr_status *mr_status); 3928 struct ib_mr_status *mr_status);
3928 3929
3930/**
3931 * ib_device_try_get: Hold a registration lock
3932 * device: The device to lock
3933 *
3934 * A device under an active registration lock cannot become unregistered. It
3935 * is only possible to obtain a registration lock on a device that is fully
3936 * registered, otherwise this function returns false.
3937 *
3938 * The registration lock is only necessary for actions which require the
3939 * device to still be registered. Uses that only require the device pointer to
3940 * be valid should use get_device(&ibdev->dev) to hold the memory.
3941 *
3942 */
3943static inline bool ib_device_try_get(struct ib_device *dev)
3944{
3945 return refcount_inc_not_zero(&dev->refcount);
3946}
3947
3948void ib_device_put(struct ib_device *device);
3929struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3949struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
3930 u16 pkey, const union ib_gid *gid, 3950 u16 pkey, const union ib_gid *gid,
3931 const struct sockaddr *addr); 3951 const struct sockaddr *addr);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 0cdc3999ecfa..c5188ff724d1 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
173 if (snd_BUG_ON(!stream)) 173 if (snd_BUG_ON(!stream))
174 return; 174 return;
175 175
176 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 176 if (stream->direction == SND_COMPRESS_PLAYBACK)
177 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
178 else
179 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
180
177 wake_up(&stream->runtime->sleep); 181 wake_up(&stream->runtime->sleep);
178} 182}
179 183
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 7fa48b100936..cc7c8d42d4fd 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -68,6 +68,7 @@ struct hda_bus {
68 unsigned int response_reset:1; /* controller was reset */ 68 unsigned int response_reset:1; /* controller was reset */
69 unsigned int in_reset:1; /* during reset operation */ 69 unsigned int in_reset:1; /* during reset operation */
70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ 70 unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
71 unsigned int bus_probing :1; /* during probing process */
71 72
72 int primary_dig_out_type; /* primary digital out PCM type */ 73 int primary_dig_out_type; /* primary digital out PCM type */
73 unsigned int mixer_assigned; /* codec addr for mixer name */ 74 unsigned int mixer_assigned; /* codec addr for mixer name */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8ec1de856ee7..e665f111b0d2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -985,6 +985,12 @@ struct snd_soc_dai_link {
985 /* Do not create a PCM for this DAI link (Backend link) */ 985 /* Do not create a PCM for this DAI link (Backend link) */
986 unsigned int ignore:1; 986 unsigned int ignore:1;
987 987
988 /*
989 * This driver uses legacy platform naming. Set by the core, machine
990 * drivers should not modify this value.
991 */
992 unsigned int legacy_platform:1;
993
988 struct list_head list; /* DAI link list of the soc card */ 994 struct list_head list; /* DAI link list of the soc card */
989 struct snd_soc_dobj dobj; /* For topology */ 995 struct snd_soc_dobj dobj; /* For topology */
990}; 996};
diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binderfs.h
index 65b2efd1a0a5..87410477aea9 100644
--- a/include/uapi/linux/android/binder_ctl.h
+++ b/include/uapi/linux/android/binderfs.h
@@ -4,8 +4,8 @@
4 * 4 *
5 */ 5 */
6 6
7#ifndef _UAPI_LINUX_BINDER_CTL_H 7#ifndef _UAPI_LINUX_BINDERFS_H
8#define _UAPI_LINUX_BINDER_CTL_H 8#define _UAPI_LINUX_BINDERFS_H
9 9
10#include <linux/android/binder.h> 10#include <linux/android/binder.h>
11#include <linux/types.h> 11#include <linux/types.h>
@@ -22,8 +22,8 @@
22 */ 22 */
23struct binderfs_device { 23struct binderfs_device {
24 char name[BINDERFS_MAX_NAME + 1]; 24 char name[BINDERFS_MAX_NAME + 1];
25 __u8 major; 25 __u32 major;
26 __u8 minor; 26 __u32 minor;
27}; 27};
28 28
29/** 29/**
@@ -31,5 +31,5 @@ struct binderfs_device {
31 */ 31 */
32#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) 32#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device)
33 33
34#endif /* _UAPI_LINUX_BINDER_CTL_H */ 34#endif /* _UAPI_LINUX_BINDERFS_H */
35 35
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 6fa38d001d84..498eec813494 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -138,6 +138,7 @@ struct blk_zone_range {
138 * @BLKRESETZONE: Reset the write pointer of the zones in the specified 138 * @BLKRESETZONE: Reset the write pointer of the zones in the specified
139 * sector range. The sector range must be zone aligned. 139 * sector range. The sector range must be zone aligned.
140 * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors. 140 * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
141 * @BLKGETNRZONES: Get the total number of zones of the device.
141 */ 142 */
142#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) 143#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
143#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) 144#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index fb78f6f500f3..f056b2a00d5c 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -26,13 +26,17 @@
26 */ 26 */
27 27
28struct input_event { 28struct input_event {
29#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) 29#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__)
30 struct timeval time; 30 struct timeval time;
31#define input_event_sec time.tv_sec 31#define input_event_sec time.tv_sec
32#define input_event_usec time.tv_usec 32#define input_event_usec time.tv_usec
33#else 33#else
34 __kernel_ulong_t __sec; 34 __kernel_ulong_t __sec;
35#if defined(__sparc__) && defined(__arch64__)
36 unsigned int __usec;
37#else
35 __kernel_ulong_t __usec; 38 __kernel_ulong_t __usec;
39#endif
36#define input_event_sec __sec 40#define input_event_sec __sec
37#define input_event_usec __usec 41#define input_event_usec __usec
38#endif 42#endif
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 1196e1c1d4f6..ff8e7dc9d4dd 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -79,6 +79,12 @@
79#define VIRTIO_F_RING_PACKED 34 79#define VIRTIO_F_RING_PACKED 34
80 80
81/* 81/*
82 * This feature indicates that memory accesses by the driver and the
83 * device are ordered in a way described by the platform.
84 */
85#define VIRTIO_F_ORDER_PLATFORM 36
86
87/*
82 * Does the device support Single Root I/O Virtualization? 88 * Does the device support Single Root I/O Virtualization?
83 */ 89 */
84#define VIRTIO_F_SR_IOV 37 90#define VIRTIO_F_SR_IOV 37
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 2414f8af26b3..4c4e24c291a5 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -213,14 +213,4 @@ struct vring_packed_desc {
213 __le16 flags; 213 __le16 flags;
214}; 214};
215 215
216struct vring_packed {
217 unsigned int num;
218
219 struct vring_packed_desc *desc;
220
221 struct vring_packed_desc_event *driver;
222
223 struct vring_packed_desc_event *device;
224};
225
226#endif /* _UAPI_LINUX_VIRTIO_RING_H */ 216#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index ef3c7ec793a7..eb76b38a00d4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq {
52 __aligned_u64 que_addr; 52 __aligned_u64 que_addr;
53}; 53};
54 54
55struct hns_roce_ib_create_srq_resp {
56 __u32 srqn;
57 __u32 reserved;
58};
59
55struct hns_roce_ib_create_qp { 60struct hns_roce_ib_create_qp {
56 __aligned_u64 buf_addr; 61 __aligned_u64 buf_addr;
57 __aligned_u64 db_addr; 62 __aligned_u64 db_addr;
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 59a260712a56..2ca9164a79bf 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -1,17 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 2#ifndef _XEN_ARM_PAGE_COHERENT_H
3#define _ASM_ARM_XEN_PAGE_COHERENT_H 3#define _XEN_ARM_PAGE_COHERENT_H
4
5#include <asm/page.h>
6#include <asm/dma-mapping.h>
7#include <linux/dma-mapping.h>
8
9static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
10{
11 if (dev && dev->archdata.dev_dma_ops)
12 return dev->archdata.dev_dma_ops;
13 return get_arch_dma_ops(NULL);
14}
15 4
16void __xen_dma_map_page(struct device *hwdev, struct page *page, 5void __xen_dma_map_page(struct device *hwdev, struct page *page,
17 dma_addr_t dev_addr, unsigned long offset, size_t size, 6 dma_addr_t dev_addr, unsigned long offset, size_t size,
@@ -21,87 +10,7 @@ void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
21 unsigned long attrs); 10 unsigned long attrs);
22void __xen_dma_sync_single_for_cpu(struct device *hwdev, 11void __xen_dma_sync_single_for_cpu(struct device *hwdev,
23 dma_addr_t handle, size_t size, enum dma_data_direction dir); 12 dma_addr_t handle, size_t size, enum dma_data_direction dir);
24
25void __xen_dma_sync_single_for_device(struct device *hwdev, 13void __xen_dma_sync_single_for_device(struct device *hwdev,
26 dma_addr_t handle, size_t size, enum dma_data_direction dir); 14 dma_addr_t handle, size_t size, enum dma_data_direction dir);
27 15
28static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 16#endif /* _XEN_ARM_PAGE_COHERENT_H */
29 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
30{
31 return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
32}
33
34static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
35 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
36{
37 xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
38}
39
40static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
41 dma_addr_t dev_addr, unsigned long offset, size_t size,
42 enum dma_data_direction dir, unsigned long attrs)
43{
44 unsigned long page_pfn = page_to_xen_pfn(page);
45 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
46 unsigned long compound_pages =
47 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
48 bool local = (page_pfn <= dev_pfn) &&
49 (dev_pfn - page_pfn < compound_pages);
50
51 /*
52 * Dom0 is mapped 1:1, while the Linux page can span across
53 * multiple Xen pages, it's not possible for it to contain a
54 * mix of local and foreign Xen pages. So if the first xen_pfn
55 * == mfn the page is local otherwise it's a foreign page
56 * grant-mapped in dom0. If the page is local we can safely
57 * call the native dma_ops function, otherwise we call the xen
58 * specific function.
59 */
60 if (local)
61 xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
62 else
63 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
64}
65
66static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
67 size_t size, enum dma_data_direction dir, unsigned long attrs)
68{
69 unsigned long pfn = PFN_DOWN(handle);
70 /*
71 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
72 * multiple Xen page, it's not possible to have a mix of local and
73 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
74 * foreign mfn will always return false. If the page is local we can
75 * safely call the native dma_ops function, otherwise we call the xen
76 * specific function.
77 */
78 if (pfn_valid(pfn)) {
79 if (xen_get_dma_ops(hwdev)->unmap_page)
80 xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
81 } else
82 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
83}
84
85static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
86 dma_addr_t handle, size_t size, enum dma_data_direction dir)
87{
88 unsigned long pfn = PFN_DOWN(handle);
89 if (pfn_valid(pfn)) {
90 if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
91 xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
92 } else
93 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
94}
95
96static inline void xen_dma_sync_single_for_device(struct device *hwdev,
97 dma_addr_t handle, size_t size, enum dma_data_direction dir)
98{
99 unsigned long pfn = PFN_DOWN(handle);
100 if (pfn_valid(pfn)) {
101 if (xen_get_dma_ops(hwdev)->sync_single_for_device)
102 xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
103 } else
104 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
105}
106
107#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/init/Kconfig b/init/Kconfig
index 513fa544a134..c9386a365eea 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED
512 per default but can be enabled through passing psi=1 on the 512 per default but can be enabled through passing psi=1 on the
513 kernel commandline during boot. 513 kernel commandline during boot.
514 514
515 This feature adds some code to the task wakeup and sleep
516 paths of the scheduler. The overhead is too low to affect
517 common scheduling-intense workloads in practice (such as
518 webservers, memcache), but it does show up in artificial
519 scheduler stress tests, such as hackbench.
520
521 If you are paranoid and not sure what the kernel will be
522 used for, say Y.
523
524 Say N if unsure.
525
515endmenu # "CPU/Task time and stats accounting" 526endmenu # "CPU/Task time and stats accounting"
516 527
517config CPU_ISOLATION 528config CPU_ISOLATION
@@ -825,7 +836,7 @@ config CGROUP_PIDS
825 PIDs controller is designed to stop this from happening. 836 PIDs controller is designed to stop this from happening.
826 837
827 It should be noted that organisational operations (such as attaching 838 It should be noted that organisational operations (such as attaching
828 to a cgroup hierarchy will *not* be blocked by the PIDs controller), 839 to a cgroup hierarchy) will *not* be blocked by the PIDs controller,
829 since the PIDs limit only affects a process's ability to fork, not to 840 since the PIDs limit only affects a process's ability to fork, not to
830 attach to a cgroup. 841 attach to a cgroup.
831 842
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 91d5c38eb7e5..d1c6d152da89 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { }
376 376
377#ifdef CONFIG_HOTPLUG_SMT 377#ifdef CONFIG_HOTPLUG_SMT
378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 378enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
379EXPORT_SYMBOL_GPL(cpu_smt_control);
380
381static bool cpu_smt_available __read_mostly;
382 379
383void __init cpu_smt_disable(bool force) 380void __init cpu_smt_disable(bool force)
384{ 381{
@@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force)
397 394
398/* 395/*
399 * The decision whether SMT is supported can only be done after the full 396 * The decision whether SMT is supported can only be done after the full
400 * CPU identification. Called from architecture code before non boot CPUs 397 * CPU identification. Called from architecture code.
401 * are brought up.
402 */
403void __init cpu_smt_check_topology_early(void)
404{
405 if (!topology_smt_supported())
406 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
407}
408
409/*
410 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
411 * brought online. This ensures the smt/l1tf sysfs entries are consistent
412 * with reality. cpu_smt_available is set to true during the bringup of non
413 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
414 * cpu_smt_control's previous setting.
415 */ 398 */
416void __init cpu_smt_check_topology(void) 399void __init cpu_smt_check_topology(void)
417{ 400{
418 if (!cpu_smt_available) 401 if (!topology_smt_supported())
419 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 402 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
420} 403}
421 404
@@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable);
428 411
429static inline bool cpu_smt_allowed(unsigned int cpu) 412static inline bool cpu_smt_allowed(unsigned int cpu)
430{ 413{
431 if (topology_is_primary_thread(cpu)) 414 if (cpu_smt_control == CPU_SMT_ENABLED)
432 return true; 415 return true;
433 416
434 /* 417 if (topology_is_primary_thread(cpu))
435 * If the CPU is not a 'primary' thread and the booted_once bit is
436 * set then the processor has SMT support. Store this information
437 * for the late check of SMT support in cpu_smt_check_topology().
438 */
439 if (per_cpu(cpuhp_state, cpu).booted_once)
440 cpu_smt_available = true;
441
442 if (cpu_smt_control == CPU_SMT_ENABLED)
443 return true; 418 return true;
444 419
445 /* 420 /*
@@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2090 */ 2065 */
2091 cpuhp_offline_cpu_device(cpu); 2066 cpuhp_offline_cpu_device(cpu);
2092 } 2067 }
2093 if (!ret) { 2068 if (!ret)
2094 cpu_smt_control = ctrlval; 2069 cpu_smt_control = ctrlval;
2095 arch_smt_update();
2096 }
2097 cpu_maps_update_done(); 2070 cpu_maps_update_done();
2098 return ret; 2071 return ret;
2099} 2072}
@@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void)
2104 2077
2105 cpu_maps_update_begin(); 2078 cpu_maps_update_begin();
2106 cpu_smt_control = CPU_SMT_ENABLED; 2079 cpu_smt_control = CPU_SMT_ENABLED;
2107 arch_smt_update();
2108 for_each_present_cpu(cpu) { 2080 for_each_present_cpu(cpu) {
2109 /* Skip online CPUs and CPUs on offline nodes */ 2081 /* Skip online CPUs and CPUs on offline nodes */
2110 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) 2082 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3cd13a30f732..e5ede6918050 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
436 void __user *buffer, size_t *lenp, 436 void __user *buffer, size_t *lenp,
437 loff_t *ppos) 437 loff_t *ppos)
438{ 438{
439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 439 int ret;
440 440 int perf_cpu = sysctl_perf_cpu_time_max_percent;
441 if (ret || !write)
442 return ret;
443
444 /* 441 /*
445 * If throttling is disabled don't allow the write: 442 * If throttling is disabled don't allow the write:
446 */ 443 */
447 if (sysctl_perf_cpu_time_max_percent == 100 || 444 if (write && (perf_cpu == 100 || perf_cpu == 0))
448 sysctl_perf_cpu_time_max_percent == 0)
449 return -EINVAL; 445 return -EINVAL;
450 446
447 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
448 if (ret || !write)
449 return ret;
450
451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); 451 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; 452 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
453 update_perf_cpu_limits(); 453 update_perf_cpu_limits();
diff --git a/kernel/exit.c b/kernel/exit.c
index 284f2fe9a293..2639a30a8aa5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w)
307 * MB (A) MB (B) 307 * MB (A) MB (B)
308 * [L] cond [L] tsk 308 * [L] cond [L] tsk
309 */ 309 */
310 smp_rmb(); /* (B) */ 310 smp_mb(); /* (B) */
311 311
312 /* 312 /*
313 * Avoid using task_rcu_dereference() magic as long as we are careful, 313 * Avoid using task_rcu_dereference() magic as long as we are careful,
@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
558 return NULL; 558 return NULL;
559} 559}
560 560
561static struct task_struct *find_child_reaper(struct task_struct *father) 561static struct task_struct *find_child_reaper(struct task_struct *father,
562 struct list_head *dead)
562 __releases(&tasklist_lock) 563 __releases(&tasklist_lock)
563 __acquires(&tasklist_lock) 564 __acquires(&tasklist_lock)
564{ 565{
565 struct pid_namespace *pid_ns = task_active_pid_ns(father); 566 struct pid_namespace *pid_ns = task_active_pid_ns(father);
566 struct task_struct *reaper = pid_ns->child_reaper; 567 struct task_struct *reaper = pid_ns->child_reaper;
568 struct task_struct *p, *n;
567 569
568 if (likely(reaper != father)) 570 if (likely(reaper != father))
569 return reaper; 571 return reaper;
@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
579 panic("Attempted to kill init! exitcode=0x%08x\n", 581 panic("Attempted to kill init! exitcode=0x%08x\n",
580 father->signal->group_exit_code ?: father->exit_code); 582 father->signal->group_exit_code ?: father->exit_code);
581 } 583 }
584
585 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
586 list_del_init(&p->ptrace_entry);
587 release_task(p);
588 }
589
582 zap_pid_ns_processes(pid_ns); 590 zap_pid_ns_processes(pid_ns);
583 write_lock_irq(&tasklist_lock); 591 write_lock_irq(&tasklist_lock);
584 592
@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father,
668 exit_ptrace(father, dead); 676 exit_ptrace(father, dead);
669 677
670 /* Can drop and reacquire tasklist_lock */ 678 /* Can drop and reacquire tasklist_lock */
671 reaper = find_child_reaper(father); 679 reaper = find_child_reaper(father, dead);
672 if (list_empty(&father->children)) 680 if (list_empty(&father->children))
673 return; 681 return;
674 682
diff --git a/kernel/futex.c b/kernel/futex.c
index be3bff2315ff..fdd312da0992 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1452,11 +1452,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1452 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) 1452 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1453 return; 1453 return;
1454 1454
1455 /* 1455 get_task_struct(p);
1456 * Queue the task for later wakeup for after we've released
1457 * the hb->lock. wake_q_add() grabs reference to p.
1458 */
1459 wake_q_add(wake_q, p);
1460 __unqueue_futex(q); 1456 __unqueue_futex(q);
1461 /* 1457 /*
1462 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL 1458 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1466,6 +1462,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
1466 * plist_del in __unqueue_futex(). 1462 * plist_del in __unqueue_futex().
1467 */ 1463 */
1468 smp_store_release(&q->lock_ptr, NULL); 1464 smp_store_release(&q->lock_ptr, NULL);
1465
1466 /*
1467 * Queue the task for later wakeup for after we've released
1468 * the hb->lock. wake_q_add() grabs reference to p.
1469 */
1470 wake_q_add(wake_q, p);
1471 put_task_struct(p);
1469} 1472}
1470 1473
1471/* 1474/*
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 45b68b4ea48b..f18cd5aa33e8 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -9,7 +9,7 @@
9#include <linux/cpu.h> 9#include <linux/cpu.h>
10 10
11static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, 11static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
12 int cpus_per_vec) 12 unsigned int cpus_per_vec)
13{ 13{
14 const struct cpumask *siblmsk; 14 const struct cpumask *siblmsk;
15 int cpu, sibl; 15 int cpu, sibl;
@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
95} 95}
96 96
97static int __irq_build_affinity_masks(const struct irq_affinity *affd, 97static int __irq_build_affinity_masks(const struct irq_affinity *affd,
98 int startvec, int numvecs, int firstvec, 98 unsigned int startvec,
99 unsigned int numvecs,
100 unsigned int firstvec,
99 cpumask_var_t *node_to_cpumask, 101 cpumask_var_t *node_to_cpumask,
100 const struct cpumask *cpu_mask, 102 const struct cpumask *cpu_mask,
101 struct cpumask *nmsk, 103 struct cpumask *nmsk,
102 struct irq_affinity_desc *masks) 104 struct irq_affinity_desc *masks)
103{ 105{
104 int n, nodes, cpus_per_vec, extra_vecs, done = 0; 106 unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
105 int last_affv = firstvec + numvecs; 107 unsigned int last_affv = firstvec + numvecs;
106 int curvec = startvec; 108 unsigned int curvec = startvec;
107 nodemask_t nodemsk = NODE_MASK_NONE; 109 nodemask_t nodemsk = NODE_MASK_NONE;
108 110
109 if (!cpumask_weight(cpu_mask)) 111 if (!cpumask_weight(cpu_mask))
@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
117 */ 119 */
118 if (numvecs <= nodes) { 120 if (numvecs <= nodes) {
119 for_each_node_mask(n, nodemsk) { 121 for_each_node_mask(n, nodemsk) {
120 cpumask_or(&masks[curvec].mask, 122 cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
121 &masks[curvec].mask, 123 node_to_cpumask[n]);
122 node_to_cpumask[n]);
123 if (++curvec == last_affv) 124 if (++curvec == last_affv)
124 curvec = firstvec; 125 curvec = firstvec;
125 } 126 }
126 done = numvecs; 127 return numvecs;
127 goto out;
128 } 128 }
129 129
130 for_each_node_mask(n, nodemsk) { 130 for_each_node_mask(n, nodemsk) {
131 int ncpus, v, vecs_to_assign, vecs_per_node; 131 unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
132 132
133 /* Spread the vectors per node */ 133 /* Spread the vectors per node */
134 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; 134 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
163 curvec = firstvec; 163 curvec = firstvec;
164 --nodes; 164 --nodes;
165 } 165 }
166
167out:
168 return done; 166 return done;
169} 167}
170 168
@@ -174,19 +172,24 @@ out:
174 * 2) spread other possible CPUs on these vectors 172 * 2) spread other possible CPUs on these vectors
175 */ 173 */
176static int irq_build_affinity_masks(const struct irq_affinity *affd, 174static int irq_build_affinity_masks(const struct irq_affinity *affd,
177 int startvec, int numvecs, int firstvec, 175 unsigned int startvec, unsigned int numvecs,
178 cpumask_var_t *node_to_cpumask, 176 unsigned int firstvec,
179 struct irq_affinity_desc *masks) 177 struct irq_affinity_desc *masks)
180{ 178{
181 int curvec = startvec, nr_present, nr_others; 179 unsigned int curvec = startvec, nr_present, nr_others;
182 int ret = -ENOMEM; 180 cpumask_var_t *node_to_cpumask;
183 cpumask_var_t nmsk, npresmsk; 181 cpumask_var_t nmsk, npresmsk;
182 int ret = -ENOMEM;
184 183
185 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) 184 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
186 return ret; 185 return ret;
187 186
188 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) 187 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
189 goto fail; 188 goto fail_nmsk;
189
190 node_to_cpumask = alloc_node_to_cpumask();
191 if (!node_to_cpumask)
192 goto fail_npresmsk;
190 193
191 ret = 0; 194 ret = 0;
192 /* Stabilize the cpumasks */ 195 /* Stabilize the cpumasks */
@@ -217,13 +220,22 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
217 if (nr_present < numvecs) 220 if (nr_present < numvecs)
218 WARN_ON(nr_present + nr_others < numvecs); 221 WARN_ON(nr_present + nr_others < numvecs);
219 222
223 free_node_to_cpumask(node_to_cpumask);
224
225 fail_npresmsk:
220 free_cpumask_var(npresmsk); 226 free_cpumask_var(npresmsk);
221 227
222 fail: 228 fail_nmsk:
223 free_cpumask_var(nmsk); 229 free_cpumask_var(nmsk);
224 return ret; 230 return ret;
225} 231}
226 232
233static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
234{
235 affd->nr_sets = 1;
236 affd->set_size[0] = affvecs;
237}
238
227/** 239/**
228 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading 240 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
229 * @nvecs: The total number of vectors 241 * @nvecs: The total number of vectors
@@ -232,50 +244,62 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
232 * Returns the irq_affinity_desc pointer or NULL if allocation failed. 244 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
233 */ 245 */
234struct irq_affinity_desc * 246struct irq_affinity_desc *
235irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 247irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
236{ 248{
237 int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; 249 unsigned int affvecs, curvec, usedvecs, i;
238 int curvec, usedvecs;
239 cpumask_var_t *node_to_cpumask;
240 struct irq_affinity_desc *masks = NULL; 250 struct irq_affinity_desc *masks = NULL;
241 int i, nr_sets;
242 251
243 /* 252 /*
244 * If there aren't any vectors left after applying the pre/post 253 * Determine the number of vectors which need interrupt affinities
245 * vectors don't bother with assigning affinity. 254 * assigned. If the pre/post request exhausts the available vectors
255 * then nothing to do here except for invoking the calc_sets()
256 * callback so the device driver can adjust to the situation. If there
257 * is only a single vector, then managing the queue is pointless as
258 * well.
246 */ 259 */
247 if (nvecs == affd->pre_vectors + affd->post_vectors) 260 if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
261 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
262 else
263 affvecs = 0;
264
265 /*
266 * Simple invocations do not provide a calc_sets() callback. Install
267 * the generic one.
268 */
269 if (!affd->calc_sets)
270 affd->calc_sets = default_calc_sets;
271
272 /* Recalculate the sets */
273 affd->calc_sets(affd, affvecs);
274
275 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
248 return NULL; 276 return NULL;
249 277
250 node_to_cpumask = alloc_node_to_cpumask(); 278 /* Nothing to assign? */
251 if (!node_to_cpumask) 279 if (!affvecs)
252 return NULL; 280 return NULL;
253 281
254 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); 282 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
255 if (!masks) 283 if (!masks)
256 goto outnodemsk; 284 return NULL;
257 285
258 /* Fill out vectors at the beginning that don't need affinity */ 286 /* Fill out vectors at the beginning that don't need affinity */
259 for (curvec = 0; curvec < affd->pre_vectors; curvec++) 287 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
260 cpumask_copy(&masks[curvec].mask, irq_default_affinity); 288 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
289
261 /* 290 /*
262 * Spread on present CPUs starting from affd->pre_vectors. If we 291 * Spread on present CPUs starting from affd->pre_vectors. If we
263 * have multiple sets, build each sets affinity mask separately. 292 * have multiple sets, build each sets affinity mask separately.
264 */ 293 */
265 nr_sets = affd->nr_sets; 294 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
266 if (!nr_sets) 295 unsigned int this_vecs = affd->set_size[i];
267 nr_sets = 1;
268
269 for (i = 0, usedvecs = 0; i < nr_sets; i++) {
270 int this_vecs = affd->sets ? affd->sets[i] : affvecs;
271 int ret; 296 int ret;
272 297
273 ret = irq_build_affinity_masks(affd, curvec, this_vecs, 298 ret = irq_build_affinity_masks(affd, curvec, this_vecs,
274 curvec, node_to_cpumask, masks); 299 curvec, masks);
275 if (ret) { 300 if (ret) {
276 kfree(masks); 301 kfree(masks);
277 masks = NULL; 302 return NULL;
278 goto outnodemsk;
279 } 303 }
280 curvec += this_vecs; 304 curvec += this_vecs;
281 usedvecs += this_vecs; 305 usedvecs += this_vecs;
@@ -293,8 +317,6 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
293 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) 317 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
294 masks[i].is_managed = 1; 318 masks[i].is_managed = 1;
295 319
296outnodemsk:
297 free_node_to_cpumask(node_to_cpumask);
298 return masks; 320 return masks;
299} 321}
300 322
@@ -304,25 +326,22 @@ outnodemsk:
304 * @maxvec: The maximum number of vectors available 326 * @maxvec: The maximum number of vectors available
305 * @affd: Description of the affinity requirements 327 * @affd: Description of the affinity requirements
306 */ 328 */
307int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) 329unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
330 const struct irq_affinity *affd)
308{ 331{
309 int resv = affd->pre_vectors + affd->post_vectors; 332 unsigned int resv = affd->pre_vectors + affd->post_vectors;
310 int vecs = maxvec - resv; 333 unsigned int set_vecs;
311 int set_vecs;
312 334
313 if (resv > minvec) 335 if (resv > minvec)
314 return 0; 336 return 0;
315 337
316 if (affd->nr_sets) { 338 if (affd->calc_sets) {
317 int i; 339 set_vecs = maxvec - resv;
318
319 for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
320 set_vecs += affd->sets[i];
321 } else { 340 } else {
322 get_online_cpus(); 341 get_online_cpus();
323 set_vecs = cpumask_weight(cpu_possible_mask); 342 set_vecs = cpumask_weight(cpu_possible_mask);
324 put_online_cpus(); 343 put_online_cpus();
325 } 344 }
326 345
327 return resv + min(set_vecs, vecs); 346 return resv + min(set_vecs, maxvec - resv);
328} 347}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c32d5f386f68..99b7dd6982a4 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -886,7 +886,11 @@ void handle_percpu_irq(struct irq_desc *desc)
886{ 886{
887 struct irq_chip *chip = irq_desc_get_chip(desc); 887 struct irq_chip *chip = irq_desc_get_chip(desc);
888 888
889 kstat_incr_irqs_this_cpu(desc); 889 /*
890 * PER CPU interrupts are not serialized. Do not touch
891 * desc->tot_count.
892 */
893 __kstat_incr_irqs_this_cpu(desc);
890 894
891 if (chip->irq_ack) 895 if (chip->irq_ack)
892 chip->irq_ack(&desc->irq_data); 896 chip->irq_ack(&desc->irq_data);
@@ -915,7 +919,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
915 unsigned int irq = irq_desc_get_irq(desc); 919 unsigned int irq = irq_desc_get_irq(desc);
916 irqreturn_t res; 920 irqreturn_t res;
917 921
918 kstat_incr_irqs_this_cpu(desc); 922 /*
923 * PER CPU interrupts are not serialized. Do not touch
924 * desc->tot_count.
925 */
926 __kstat_incr_irqs_this_cpu(desc);
919 927
920 if (chip->irq_ack) 928 if (chip->irq_ack)
921 chip->irq_ack(&desc->irq_data); 929 chip->irq_ack(&desc->irq_data);
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 59a04d2a66df..516c00a5e867 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -258,8 +258,6 @@ static int __init irq_debugfs_init(void)
258 int irq; 258 int irq;
259 259
260 root_dir = debugfs_create_dir("irq", NULL); 260 root_dir = debugfs_create_dir("irq", NULL);
261 if (!root_dir)
262 return -ENOMEM;
263 261
264 irq_domain_debugfs_init(root_dir); 262 irq_domain_debugfs_init(root_dir);
265 263
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 38554bc35375..6df5ddfdb0f8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -166,7 +166,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
166 166
167 __irq_wake_thread(desc, action); 167 __irq_wake_thread(desc, action);
168 168
169 /* Fall through to add to randomness */ 169 /* Fall through - to add to randomness */
170 case IRQ_HANDLED: 170 case IRQ_HANDLED:
171 *flags |= action->flags; 171 *flags |= action->flags;
172 break; 172 break;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 2a77cdd27ca9..70c3053bc1f6 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -244,12 +244,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
244 244
245#undef __irqd_to_state 245#undef __irqd_to_state
246 246
247static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) 247static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
248{ 248{
249 __this_cpu_inc(*desc->kstat_irqs); 249 __this_cpu_inc(*desc->kstat_irqs);
250 __this_cpu_inc(kstat.irqs_sum); 250 __this_cpu_inc(kstat.irqs_sum);
251} 251}
252 252
253static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
254{
255 __kstat_incr_irqs_this_cpu(desc);
256 desc->tot_count++;
257}
258
253static inline int irq_desc_get_node(struct irq_desc *desc) 259static inline int irq_desc_get_node(struct irq_desc *desc)
254{ 260{
255 return irq_common_data_get_node(&desc->irq_common_data); 261 return irq_common_data_get_node(&desc->irq_common_data);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a1d7a7d484e0..13539e12cd80 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
119 desc->depth = 1; 119 desc->depth = 1;
120 desc->irq_count = 0; 120 desc->irq_count = 0;
121 desc->irqs_unhandled = 0; 121 desc->irqs_unhandled = 0;
122 desc->tot_count = 0;
122 desc->name = NULL; 123 desc->name = NULL;
123 desc->owner = owner; 124 desc->owner = owner;
124 for_each_possible_cpu(cpu) 125 for_each_possible_cpu(cpu)
@@ -457,7 +458,7 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
457 458
458 /* Validate affinity mask(s) */ 459 /* Validate affinity mask(s) */
459 if (affinity) { 460 if (affinity) {
460 for (i = 0; i < cnt; i++, i++) { 461 for (i = 0; i < cnt; i++) {
461 if (cpumask_empty(&affinity[i].mask)) 462 if (cpumask_empty(&affinity[i].mask))
462 return -EINVAL; 463 return -EINVAL;
463 } 464 }
@@ -954,11 +955,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
954unsigned int kstat_irqs(unsigned int irq) 955unsigned int kstat_irqs(unsigned int irq)
955{ 956{
956 struct irq_desc *desc = irq_to_desc(irq); 957 struct irq_desc *desc = irq_to_desc(irq);
957 int cpu;
958 unsigned int sum = 0; 958 unsigned int sum = 0;
959 int cpu;
959 960
960 if (!desc || !desc->kstat_irqs) 961 if (!desc || !desc->kstat_irqs)
961 return 0; 962 return 0;
963 if (!irq_settings_is_per_cpu_devid(desc) &&
964 !irq_settings_is_per_cpu(desc))
965 return desc->tot_count;
966
962 for_each_possible_cpu(cpu) 967 for_each_possible_cpu(cpu)
963 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 968 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
964 return sum; 969 return sum;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 80818764643d..3bf9793d8825 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1763,8 +1763,6 @@ void __init irq_domain_debugfs_init(struct dentry *root)
1763 struct irq_domain *d; 1763 struct irq_domain *d;
1764 1764
1765 domain_dir = debugfs_create_dir("domains", root); 1765 domain_dir = debugfs_create_dir("domains", root);
1766 if (!domain_dir)
1767 return;
1768 1766
1769 debugfs_create_file("default", 0444, domain_dir, NULL, 1767 debugfs_create_file("default", 0444, domain_dir, NULL,
1770 &irq_domain_debug_fops); 1768 &irq_domain_debug_fops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 5b570050b9b6..9ec34a2a6638 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc)
393 } 393 }
394 394
395 cpumask_and(&mask, cpu_online_mask, set); 395 cpumask_and(&mask, cpu_online_mask, set);
396 if (cpumask_empty(&mask))
397 cpumask_copy(&mask, cpu_online_mask);
398
396 if (node != NUMA_NO_NODE) { 399 if (node != NUMA_NO_NODE) {
397 const struct cpumask *nodemask = cpumask_of_node(node); 400 const struct cpumask *nodemask = cpumask_of_node(node);
398 401
@@ -760,6 +763,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
760 case IRQ_SET_MASK_OK_DONE: 763 case IRQ_SET_MASK_OK_DONE:
761 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 764 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
762 irqd_set(&desc->irq_data, flags); 765 irqd_set(&desc->irq_data, flags);
766 /* fall through */
763 767
764 case IRQ_SET_MASK_OK_NOCOPY: 768 case IRQ_SET_MASK_OK_NOCOPY:
765 flags = irqd_get_trigger_type(&desc->irq_data); 769 flags = irqd_get_trigger_type(&desc->irq_data);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..65234c89d85b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -101,6 +101,12 @@ bool kthread_should_stop(void)
101} 101}
102EXPORT_SYMBOL(kthread_should_stop); 102EXPORT_SYMBOL(kthread_should_stop);
103 103
104bool __kthread_should_park(struct task_struct *k)
105{
106 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
107}
108EXPORT_SYMBOL_GPL(__kthread_should_park);
109
104/** 110/**
105 * kthread_should_park - should this kthread park now? 111 * kthread_should_park - should this kthread park now?
106 * 112 *
@@ -114,7 +120,7 @@ EXPORT_SYMBOL(kthread_should_stop);
114 */ 120 */
115bool kthread_should_park(void) 121bool kthread_should_park(void)
116{ 122{
117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 123 return __kthread_should_park(current);
118} 124}
119EXPORT_SYMBOL_GPL(kthread_should_park); 125EXPORT_SYMBOL_GPL(kthread_should_park);
120 126
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 09b180063ee1..50d9af615dc4 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
198 woken++; 198 woken++;
199 tsk = waiter->task; 199 tsk = waiter->task;
200 200
201 wake_q_add(wake_q, tsk); 201 get_task_struct(tsk);
202 list_del(&waiter->list); 202 list_del(&waiter->list);
203 /* 203 /*
204 * Ensure that the last operation is setting the reader 204 * Ensure calling get_task_struct() before setting the reader
205 * waiter to nil such that rwsem_down_read_failed() cannot 205 * waiter to nil such that rwsem_down_read_failed() cannot
206 * race with do_exit() by always holding a reference count 206 * race with do_exit() by always holding a reference count
207 * to the task to wakeup. 207 * to the task to wakeup.
208 */ 208 */
209 smp_store_release(&waiter->task, NULL); 209 smp_store_release(&waiter->task, NULL);
210 /*
211 * Ensure issuing the wakeup (either by us or someone else)
212 * after setting the reader waiter to nil.
213 */
214 wake_q_add(wake_q, tsk);
215 /* wake_q_add() already take the task ref */
216 put_task_struct(tsk);
210 } 217 }
211 218
212 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; 219 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a674c7db2f29..d8d76a65cfdd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -396,6 +396,18 @@ static bool set_nr_if_polling(struct task_struct *p)
396#endif 396#endif
397#endif 397#endif
398 398
399/**
400 * wake_q_add() - queue a wakeup for 'later' waking.
401 * @head: the wake_q_head to add @task to
402 * @task: the task to queue for 'later' wakeup
403 *
404 * Queue a task for later wakeup, most likely by the wake_up_q() call in the
405 * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
406 * instantly.
407 *
408 * This function must be used as-if it were wake_up_process(); IOW the task
409 * must be ready to be woken at this location.
410 */
399void wake_q_add(struct wake_q_head *head, struct task_struct *task) 411void wake_q_add(struct wake_q_head *head, struct task_struct *task)
400{ 412{
401 struct wake_q_node *node = &task->wake_q; 413 struct wake_q_node *node = &task->wake_q;
@@ -405,10 +417,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
405 * its already queued (either by us or someone else) and will get the 417 * its already queued (either by us or someone else) and will get the
406 * wakeup due to that. 418 * wakeup due to that.
407 * 419 *
408 * This cmpxchg() executes a full barrier, which pairs with the full 420 * In order to ensure that a pending wakeup will observe our pending
409 * barrier executed by the wakeup in wake_up_q(). 421 * state, even in the failed case, an explicit smp_mb() must be used.
410 */ 422 */
411 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) 423 smp_mb__before_atomic();
424 if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
412 return; 425 return;
413 426
414 get_task_struct(task); 427 get_task_struct(task);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50aa2aba69bd..310d0637fe4b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
5980 5980
5981#ifdef CONFIG_SCHED_SMT 5981#ifdef CONFIG_SCHED_SMT
5982DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5982DEFINE_STATIC_KEY_FALSE(sched_smt_present);
5983EXPORT_SYMBOL_GPL(sched_smt_present);
5983 5984
5984static inline void set_idle_cores(int cpu, int val) 5985static inline void set_idle_cores(int cpu, int val)
5985{ 5986{
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index fe24de3fbc93..c3484785b179 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -124,6 +124,7 @@
124 * sampling of the aggregate task states would be. 124 * sampling of the aggregate task states would be.
125 */ 125 */
126 126
127#include "../workqueue_internal.h"
127#include <linux/sched/loadavg.h> 128#include <linux/sched/loadavg.h>
128#include <linux/seq_file.h> 129#include <linux/seq_file.h>
129#include <linux/proc_fs.h> 130#include <linux/proc_fs.h>
@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu,
480 groupc->tasks[t]++; 481 groupc->tasks[t]++;
481 482
482 write_seqcount_end(&groupc->seq); 483 write_seqcount_end(&groupc->seq);
483
484 if (!delayed_work_pending(&group->clock_work))
485 schedule_delayed_work(&group->clock_work, PSI_FREQ);
486} 484}
487 485
488static struct psi_group *iterate_groups(struct task_struct *task, void **iter) 486static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set)
513{ 511{
514 int cpu = task_cpu(task); 512 int cpu = task_cpu(task);
515 struct psi_group *group; 513 struct psi_group *group;
514 bool wake_clock = true;
516 void *iter = NULL; 515 void *iter = NULL;
517 516
518 if (!task->pid) 517 if (!task->pid)
@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set)
530 task->psi_flags &= ~clear; 529 task->psi_flags &= ~clear;
531 task->psi_flags |= set; 530 task->psi_flags |= set;
532 531
533 while ((group = iterate_groups(task, &iter))) 532 /*
533 * Periodic aggregation shuts off if there is a period of no
534 * task changes, so we wake it back up if necessary. However,
535 * don't do this if the task change is the aggregation worker
536 * itself going to sleep, or we'll ping-pong forever.
537 */
538 if (unlikely((clear & TSK_RUNNING) &&
539 (task->flags & PF_WQ_WORKER) &&
540 wq_worker_last_func(task) == psi_update_work))
541 wake_clock = false;
542
543 while ((group = iterate_groups(task, &iter))) {
534 psi_group_change(group, cpu, clear, set); 544 psi_group_change(group, cpu, clear, set);
545 if (wake_clock && !delayed_work_pending(&group->clock_work))
546 schedule_delayed_work(&group->clock_work, PSI_FREQ);
547 }
535} 548}
536 549
537void psi_memstall_tick(struct task_struct *task, int cpu) 550void psi_memstall_tick(struct task_struct *task, int cpu)
diff --git a/kernel/smp.c b/kernel/smp.c
index 163c451af42e..f4cf1b0bb3b8 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -584,8 +584,6 @@ void __init smp_init(void)
584 num_nodes, (num_nodes > 1 ? "s" : ""), 584 num_nodes, (num_nodes > 1 ? "s" : ""),
585 num_cpus, (num_cpus > 1 ? "s" : "")); 585 num_cpus, (num_cpus > 1 ? "s" : ""));
586 586
587 /* Final decision about SMT support */
588 cpu_smt_check_topology();
589 /* Any cleanup work */ 587 /* Any cleanup work */
590 smp_cpus_done(setup_max_cpus); 588 smp_cpus_done(setup_max_cpus);
591} 589}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d28813306b2c..10277429ed84 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -89,7 +89,8 @@ static bool ksoftirqd_running(unsigned long pending)
89 89
90 if (pending & SOFTIRQ_NOW_MASK) 90 if (pending & SOFTIRQ_NOW_MASK)
91 return false; 91 return false;
92 return tsk && (tsk->state == TASK_RUNNING); 92 return tsk && (tsk->state == TASK_RUNNING) &&
93 !__kthread_should_park(tsk);
93} 94}
94 95
95/* 96/*
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 8f0644af40be..80f955210861 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
685 * set up the signal and overrun bookkeeping. 685 * set up the signal and overrun bookkeeping.
686 */ 686 */
687 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); 687 timer->it.cpu.incr = timespec64_to_ns(&new->it_interval);
688 timer->it_interval = ns_to_ktime(timer->it.cpu.incr);
688 689
689 /* 690 /*
690 * This acts as a modification timestamp for the timer, 691 * This acts as a modification timestamp for the timer,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index e335576b9411..9bde07c06362 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -5,7 +5,7 @@
5 * Copyright (C) IBM Corporation, 2010-2012 5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> 6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */ 7 */
8#define pr_fmt(fmt) "trace_kprobe: " fmt 8#define pr_fmt(fmt) "trace_uprobe: " fmt
9 9
10#include <linux/ctype.h> 10#include <linux/ctype.h>
11#include <linux/module.h> 11#include <linux/module.h>
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
160 if (ret >= 0) { 160 if (ret >= 0) {
161 if (ret == maxlen) 161 if (ret == maxlen)
162 dst[ret - 1] = '\0'; 162 dst[ret - 1] = '\0';
163 else
164 /*
165 * Include the terminating null byte. In this case it
166 * was copied by strncpy_from_user but not accounted
167 * for in ret.
168 */
169 ret++;
163 *(u32 *)dest = make_data_loc(ret, (void *)dst - base); 170 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
164 } 171 }
165 172
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 392be4b252f6..fc5d23d752a5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
910} 910}
911 911
912/** 912/**
913 * wq_worker_last_func - retrieve worker's last work function
914 *
915 * Determine the last function a worker executed. This is called from
916 * the scheduler to get a worker's last known identity.
917 *
918 * CONTEXT:
919 * spin_lock_irq(rq->lock)
920 *
921 * Return:
922 * The last work function %current executed as a worker, NULL if it
923 * hasn't executed any work yet.
924 */
925work_func_t wq_worker_last_func(struct task_struct *task)
926{
927 struct worker *worker = kthread_data(task);
928
929 return worker->last_func;
930}
931
932/**
913 * worker_set_flags - set worker flags and adjust nr_running accordingly 933 * worker_set_flags - set worker flags and adjust nr_running accordingly
914 * @worker: self 934 * @worker: self
915 * @flags: flags to set 935 * @flags: flags to set
@@ -2184,6 +2204,9 @@ __acquires(&pool->lock)
2184 if (unlikely(cpu_intensive)) 2204 if (unlikely(cpu_intensive))
2185 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2205 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2186 2206
2207 /* tag the worker for identification in schedule() */
2208 worker->last_func = worker->current_func;
2209
2187 /* we're done with it, release */ 2210 /* we're done with it, release */
2188 hash_del(&worker->hentry); 2211 hash_del(&worker->hentry);
2189 worker->current_work = NULL; 2212 worker->current_work = NULL;
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 66fbb5a9e633..cb68b03ca89a 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@ struct worker {
53 53
54 /* used only by rescuers to point to the target workqueue */ 54 /* used only by rescuers to point to the target workqueue */
55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 55 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
56
57 /* used by the scheduler to determine a worker's last known identity */
58 work_func_t last_func;
56}; 59};
57 60
58/** 61/**
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
67 70
68/* 71/*
69 * Scheduler hooks for concurrency managed workqueue. Only to be used from 72 * Scheduler hooks for concurrency managed workqueue. Only to be used from
70 * sched/core.c and workqueue.c. 73 * sched/ and workqueue.c.
71 */ 74 */
72void wq_worker_waking_up(struct task_struct *task, int cpu); 75void wq_worker_waking_up(struct task_struct *task, int cpu);
73struct task_struct *wq_worker_sleeping(struct task_struct *task); 76struct task_struct *wq_worker_sleeping(struct task_struct *task);
77work_func_t wq_worker_last_func(struct task_struct *task);
74 78
75#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ 79#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022111e0..9cf77628fc91 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
632 config->test_driver = NULL; 632 config->test_driver = NULL;
633 633
634 kfree_const(config->test_fs); 634 kfree_const(config->test_fs);
635 config->test_driver = NULL; 635 config->test_fs = NULL;
636} 636}
637 637
638static void kmod_config_free(struct kmod_test_device *test_dev) 638static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 4676c0a1eeca..c596a957f764 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -199,7 +199,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
200 xa_set_mark(xa, index + 1, XA_MARK_0); 200 xa_set_mark(xa, index + 1, XA_MARK_0);
201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
202 xa_set_mark(xa, index + 2, XA_MARK_1); 202 xa_set_mark(xa, index + 2, XA_MARK_2);
203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
204 xa_store_order(xa, index, order, xa_mk_index(index), 204 xa_store_order(xa, index, order, xa_mk_index(index),
205 GFP_KERNEL); 205 GFP_KERNEL);
@@ -209,8 +209,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
209 void *entry; 209 void *entry;
210 210
211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
212 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1)); 212 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
213 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2)); 213 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
214 214
215 /* We should see two elements in the array */ 215 /* We should see two elements in the array */
216 rcu_read_lock(); 216 rcu_read_lock();
@@ -357,7 +357,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
357static noinline void check_reserve(struct xarray *xa) 357static noinline void check_reserve(struct xarray *xa)
358{ 358{
359 void *entry; 359 void *entry;
360 unsigned long index = 0; 360 unsigned long index;
361 361
362 /* An array with a reserved entry is not empty */ 362 /* An array with a reserved entry is not empty */
363 XA_BUG_ON(xa, !xa_empty(xa)); 363 XA_BUG_ON(xa, !xa_empty(xa));
@@ -382,10 +382,12 @@ static noinline void check_reserve(struct xarray *xa)
382 xa_erase_index(xa, 12345678); 382 xa_erase_index(xa, 12345678);
383 XA_BUG_ON(xa, !xa_empty(xa)); 383 XA_BUG_ON(xa, !xa_empty(xa));
384 384
385 /* And so does xa_insert */ 385 /* But xa_insert does not */
386 xa_reserve(xa, 12345678, GFP_KERNEL); 386 xa_reserve(xa, 12345678, GFP_KERNEL);
387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0); 387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
388 xa_erase_index(xa, 12345678); 388 -EEXIST);
389 XA_BUG_ON(xa, xa_empty(xa));
390 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
389 XA_BUG_ON(xa, !xa_empty(xa)); 391 XA_BUG_ON(xa, !xa_empty(xa));
390 392
391 /* Can iterate through a reserved entry */ 393 /* Can iterate through a reserved entry */
@@ -393,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
393 xa_reserve(xa, 6, GFP_KERNEL); 395 xa_reserve(xa, 6, GFP_KERNEL);
394 xa_store_index(xa, 7, GFP_KERNEL); 396 xa_store_index(xa, 7, GFP_KERNEL);
395 397
396 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 398 xa_for_each(xa, index, entry) {
397 XA_BUG_ON(xa, index != 5 && index != 7); 399 XA_BUG_ON(xa, index != 5 && index != 7);
398 } 400 }
399 xa_destroy(xa); 401 xa_destroy(xa);
@@ -812,17 +814,16 @@ static noinline void check_find_1(struct xarray *xa)
812static noinline void check_find_2(struct xarray *xa) 814static noinline void check_find_2(struct xarray *xa)
813{ 815{
814 void *entry; 816 void *entry;
815 unsigned long i, j, index = 0; 817 unsigned long i, j, index;
816 818
817 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 819 xa_for_each(xa, index, entry) {
818 XA_BUG_ON(xa, true); 820 XA_BUG_ON(xa, true);
819 } 821 }
820 822
821 for (i = 0; i < 1024; i++) { 823 for (i = 0; i < 1024; i++) {
822 xa_store_index(xa, index, GFP_KERNEL); 824 xa_store_index(xa, index, GFP_KERNEL);
823 j = 0; 825 j = 0;
824 index = 0; 826 xa_for_each(xa, index, entry) {
825 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
826 XA_BUG_ON(xa, xa_mk_index(index) != entry); 827 XA_BUG_ON(xa, xa_mk_index(index) != entry);
827 XA_BUG_ON(xa, index != j++); 828 XA_BUG_ON(xa, index != j++);
828 } 829 }
@@ -839,6 +840,7 @@ static noinline void check_find_3(struct xarray *xa)
839 840
840 for (i = 0; i < 100; i++) { 841 for (i = 0; i < 100; i++) {
841 for (j = 0; j < 100; j++) { 842 for (j = 0; j < 100; j++) {
843 rcu_read_lock();
842 for (k = 0; k < 100; k++) { 844 for (k = 0; k < 100; k++) {
843 xas_set(&xas, j); 845 xas_set(&xas, j);
844 xas_for_each_marked(&xas, entry, k, XA_MARK_0) 846 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
@@ -847,6 +849,7 @@ static noinline void check_find_3(struct xarray *xa)
847 XA_BUG_ON(xa, 849 XA_BUG_ON(xa,
848 xas.xa_node != XAS_RESTART); 850 xas.xa_node != XAS_RESTART);
849 } 851 }
852 rcu_read_unlock();
850 } 853 }
851 xa_store_index(xa, i, GFP_KERNEL); 854 xa_store_index(xa, i, GFP_KERNEL);
852 xa_set_mark(xa, i, XA_MARK_0); 855 xa_set_mark(xa, i, XA_MARK_0);
@@ -1183,6 +1186,35 @@ static noinline void check_store_range(struct xarray *xa)
1183 } 1186 }
1184} 1187}
1185 1188
1189static void check_align_1(struct xarray *xa, char *name)
1190{
1191 int i;
1192 unsigned int id;
1193 unsigned long index;
1194 void *entry;
1195
1196 for (i = 0; i < 8; i++) {
1197 id = 0;
1198 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL)
1199 != 0);
1200 XA_BUG_ON(xa, id != i);
1201 }
1202 xa_for_each(xa, index, entry)
1203 XA_BUG_ON(xa, xa_is_err(entry));
1204 xa_destroy(xa);
1205}
1206
1207static noinline void check_align(struct xarray *xa)
1208{
1209 char name[] = "Motorola 68000";
1210
1211 check_align_1(xa, name);
1212 check_align_1(xa, name + 1);
1213 check_align_1(xa, name + 2);
1214 check_align_1(xa, name + 3);
1215// check_align_2(xa, name);
1216}
1217
1186static LIST_HEAD(shadow_nodes); 1218static LIST_HEAD(shadow_nodes);
1187 1219
1188static void test_update_node(struct xa_node *node) 1220static void test_update_node(struct xa_node *node)
@@ -1332,6 +1364,7 @@ static int xarray_checks(void)
1332 check_create_range(&array); 1364 check_create_range(&array);
1333 check_store_range(&array); 1365 check_store_range(&array);
1334 check_store_iter(&array); 1366 check_store_iter(&array);
1367 check_align(&xa0);
1335 1368
1336 check_workingset(&array, 0); 1369 check_workingset(&array, 0);
1337 check_workingset(&array, 64); 1370 check_workingset(&array, 64);
diff --git a/lib/xarray.c b/lib/xarray.c
index 5f3f9311de89..81c3171ddde9 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -232,6 +232,8 @@ void *xas_load(struct xa_state *xas)
232 if (xas->xa_shift > node->shift) 232 if (xas->xa_shift > node->shift)
233 break; 233 break;
234 entry = xas_descend(xas, node); 234 entry = xas_descend(xas, node);
235 if (node->shift == 0)
236 break;
235 } 237 }
236 return entry; 238 return entry;
237} 239}
@@ -506,7 +508,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
506 for (;;) { 508 for (;;) {
507 void *entry = xa_entry_locked(xas->xa, node, offset); 509 void *entry = xa_entry_locked(xas->xa, node, offset);
508 510
509 if (xa_is_node(entry)) { 511 if (node->shift && xa_is_node(entry)) {
510 node = xa_to_node(entry); 512 node = xa_to_node(entry);
511 offset = 0; 513 offset = 0;
512 continue; 514 continue;
@@ -604,6 +606,7 @@ static int xas_expand(struct xa_state *xas, void *head)
604/* 606/*
605 * xas_create() - Create a slot to store an entry in. 607 * xas_create() - Create a slot to store an entry in.
606 * @xas: XArray operation state. 608 * @xas: XArray operation state.
609 * @allow_root: %true if we can store the entry in the root directly
607 * 610 *
608 * Most users will not need to call this function directly, as it is called 611 * Most users will not need to call this function directly, as it is called
609 * by xas_store(). It is useful for doing conditional store operations 612 * by xas_store(). It is useful for doing conditional store operations
@@ -613,7 +616,7 @@ static int xas_expand(struct xa_state *xas, void *head)
613 * If the slot was newly created, returns %NULL. If it failed to create the 616 * If the slot was newly created, returns %NULL. If it failed to create the
614 * slot, returns %NULL and indicates the error in @xas. 617 * slot, returns %NULL and indicates the error in @xas.
615 */ 618 */
616static void *xas_create(struct xa_state *xas) 619static void *xas_create(struct xa_state *xas, bool allow_root)
617{ 620{
618 struct xarray *xa = xas->xa; 621 struct xarray *xa = xas->xa;
619 void *entry; 622 void *entry;
@@ -628,6 +631,8 @@ static void *xas_create(struct xa_state *xas)
628 shift = xas_expand(xas, entry); 631 shift = xas_expand(xas, entry);
629 if (shift < 0) 632 if (shift < 0)
630 return NULL; 633 return NULL;
634 if (!shift && !allow_root)
635 shift = XA_CHUNK_SHIFT;
631 entry = xa_head_locked(xa); 636 entry = xa_head_locked(xa);
632 slot = &xa->xa_head; 637 slot = &xa->xa_head;
633 } else if (xas_error(xas)) { 638 } else if (xas_error(xas)) {
@@ -687,7 +692,7 @@ void xas_create_range(struct xa_state *xas)
687 xas->xa_sibs = 0; 692 xas->xa_sibs = 0;
688 693
689 for (;;) { 694 for (;;) {
690 xas_create(xas); 695 xas_create(xas, true);
691 if (xas_error(xas)) 696 if (xas_error(xas))
692 goto restore; 697 goto restore;
693 if (xas->xa_index <= (index | XA_CHUNK_MASK)) 698 if (xas->xa_index <= (index | XA_CHUNK_MASK))
@@ -754,7 +759,7 @@ void *xas_store(struct xa_state *xas, void *entry)
754 bool value = xa_is_value(entry); 759 bool value = xa_is_value(entry);
755 760
756 if (entry) 761 if (entry)
757 first = xas_create(xas); 762 first = xas_create(xas, !xa_is_node(entry));
758 else 763 else
759 first = xas_load(xas); 764 first = xas_load(xas);
760 765
@@ -1251,35 +1256,6 @@ void *xas_find_conflict(struct xa_state *xas)
1251EXPORT_SYMBOL_GPL(xas_find_conflict); 1256EXPORT_SYMBOL_GPL(xas_find_conflict);
1252 1257
1253/** 1258/**
1254 * xa_init_flags() - Initialise an empty XArray with flags.
1255 * @xa: XArray.
1256 * @flags: XA_FLAG values.
1257 *
1258 * If you need to initialise an XArray with special flags (eg you need
1259 * to take the lock from interrupt context), use this function instead
1260 * of xa_init().
1261 *
1262 * Context: Any context.
1263 */
1264void xa_init_flags(struct xarray *xa, gfp_t flags)
1265{
1266 unsigned int lock_type;
1267 static struct lock_class_key xa_lock_irq;
1268 static struct lock_class_key xa_lock_bh;
1269
1270 spin_lock_init(&xa->xa_lock);
1271 xa->xa_flags = flags;
1272 xa->xa_head = NULL;
1273
1274 lock_type = xa_lock_type(xa);
1275 if (lock_type == XA_LOCK_IRQ)
1276 lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
1277 else if (lock_type == XA_LOCK_BH)
1278 lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
1279}
1280EXPORT_SYMBOL(xa_init_flags);
1281
1282/**
1283 * xa_load() - Load an entry from an XArray. 1259 * xa_load() - Load an entry from an XArray.
1284 * @xa: XArray. 1260 * @xa: XArray.
1285 * @index: index into array. 1261 * @index: index into array.
@@ -1308,7 +1284,6 @@ static void *xas_result(struct xa_state *xas, void *curr)
1308{ 1284{
1309 if (xa_is_zero(curr)) 1285 if (xa_is_zero(curr))
1310 return NULL; 1286 return NULL;
1311 XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
1312 if (xas_error(xas)) 1287 if (xas_error(xas))
1313 curr = xas->xa_node; 1288 curr = xas->xa_node;
1314 return curr; 1289 return curr;
@@ -1378,7 +1353,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1378 XA_STATE(xas, xa, index); 1353 XA_STATE(xas, xa, index);
1379 void *curr; 1354 void *curr;
1380 1355
1381 if (WARN_ON_ONCE(xa_is_internal(entry))) 1356 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1382 return XA_ERROR(-EINVAL); 1357 return XA_ERROR(-EINVAL);
1383 if (xa_track_free(xa) && !entry) 1358 if (xa_track_free(xa) && !entry)
1384 entry = XA_ZERO_ENTRY; 1359 entry = XA_ZERO_ENTRY;
@@ -1444,7 +1419,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1444 XA_STATE(xas, xa, index); 1419 XA_STATE(xas, xa, index);
1445 void *curr; 1420 void *curr;
1446 1421
1447 if (WARN_ON_ONCE(xa_is_internal(entry))) 1422 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1448 return XA_ERROR(-EINVAL); 1423 return XA_ERROR(-EINVAL);
1449 if (xa_track_free(xa) && !entry) 1424 if (xa_track_free(xa) && !entry)
1450 entry = XA_ZERO_ENTRY; 1425 entry = XA_ZERO_ENTRY;
@@ -1465,6 +1440,47 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1465EXPORT_SYMBOL(__xa_cmpxchg); 1440EXPORT_SYMBOL(__xa_cmpxchg);
1466 1441
1467/** 1442/**
1443 * __xa_insert() - Store this entry in the XArray if no entry is present.
1444 * @xa: XArray.
1445 * @index: Index into array.
1446 * @entry: New entry.
1447 * @gfp: Memory allocation flags.
1448 *
1449 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1450 * if no entry is present. Inserting will fail if a reserved entry is
1451 * present, even though loading from this index will return NULL.
1452 *
1453 * Context: Any context. Expects xa_lock to be held on entry. May
1454 * release and reacquire xa_lock if @gfp flags permit.
1455 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
1456 * -ENOMEM if memory could not be allocated.
1457 */
1458int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1459{
1460 XA_STATE(xas, xa, index);
1461 void *curr;
1462
1463 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1464 return -EINVAL;
1465 if (!entry)
1466 entry = XA_ZERO_ENTRY;
1467
1468 do {
1469 curr = xas_load(&xas);
1470 if (!curr) {
1471 xas_store(&xas, entry);
1472 if (xa_track_free(xa))
1473 xas_clear_mark(&xas, XA_FREE_MARK);
1474 } else {
1475 xas_set_err(&xas, -EEXIST);
1476 }
1477 } while (__xas_nomem(&xas, gfp));
1478
1479 return xas_error(&xas);
1480}
1481EXPORT_SYMBOL(__xa_insert);
1482
1483/**
1468 * __xa_reserve() - Reserve this index in the XArray. 1484 * __xa_reserve() - Reserve this index in the XArray.
1469 * @xa: XArray. 1485 * @xa: XArray.
1470 * @index: Index into array. 1486 * @index: Index into array.
@@ -1567,7 +1583,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
1567 if (last + 1) 1583 if (last + 1)
1568 order = __ffs(last + 1); 1584 order = __ffs(last + 1);
1569 xas_set_order(&xas, last, order); 1585 xas_set_order(&xas, last, order);
1570 xas_create(&xas); 1586 xas_create(&xas, true);
1571 if (xas_error(&xas)) 1587 if (xas_error(&xas))
1572 goto unlock; 1588 goto unlock;
1573 } 1589 }
@@ -1609,7 +1625,7 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
1609 XA_STATE(xas, xa, 0); 1625 XA_STATE(xas, xa, 0);
1610 int err; 1626 int err;
1611 1627
1612 if (WARN_ON_ONCE(xa_is_internal(entry))) 1628 if (WARN_ON_ONCE(xa_is_advanced(entry)))
1613 return -EINVAL; 1629 return -EINVAL;
1614 if (WARN_ON_ONCE(!xa_track_free(xa))) 1630 if (WARN_ON_ONCE(!xa_track_free(xa)))
1615 return -EINVAL; 1631 return -EINVAL;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8a8bb8796c6c..72e6d0c55cfa 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
689 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); 689 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
690 bdi->cgwb_congested_tree = RB_ROOT; 690 bdi->cgwb_congested_tree = RB_ROOT;
691 mutex_init(&bdi->cgwb_release_mutex); 691 mutex_init(&bdi->cgwb_release_mutex);
692 init_rwsem(&bdi->wb_switch_rwsem);
692 693
693 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); 694 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
694 if (!ret) { 695 if (!ret) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index df2e7dd5ff17..afef61656c1e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4268,7 +4268,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4268 break; 4268 break;
4269 } 4269 }
4270 if (ret & VM_FAULT_RETRY) { 4270 if (ret & VM_FAULT_RETRY) {
4271 if (nonblocking) 4271 if (nonblocking &&
4272 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4272 *nonblocking = 0; 4273 *nonblocking = 0;
4273 *nr_pages = 0; 4274 *nr_pages = 0;
4274 /* 4275 /*
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 0a14fcff70ed..e2bb06c1b45e 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,6 +5,7 @@ UBSAN_SANITIZE_generic.o := n
5UBSAN_SANITIZE_tags.o := n 5UBSAN_SANITIZE_tags.o := n
6KCOV_INSTRUMENT := n 6KCOV_INSTRUMENT := n
7 7
8CFLAGS_REMOVE_common.o = -pg
8CFLAGS_REMOVE_generic.o = -pg 9CFLAGS_REMOVE_generic.o = -pg
9# Function splitter causes unnecessary splits in __asan_load1/__asan_store1 10# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
10# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 11# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 7c72f2a95785..831be5ff5f4d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
372 if (fail || tk->addr_valid == 0) { 372 if (fail || tk->addr_valid == 0) {
373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 373 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
374 pfn, tk->tsk->comm, tk->tsk->pid); 374 pfn, tk->tsk->comm, tk->tsk->pid);
375 force_sig(SIGKILL, tk->tsk); 375 do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
376 tk->tsk, PIDTYPE_PID);
376 } 377 }
377 378
378 /* 379 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b9a667d36c55..124e794867c5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1233,7 +1233,8 @@ static bool is_pageblock_removable_nolock(struct page *page)
1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1233bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1234{ 1234{
1235 struct page *page = pfn_to_page(start_pfn); 1235 struct page *page = pfn_to_page(start_pfn);
1236 struct page *end_page = page + nr_pages; 1236 unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
1237 struct page *end_page = pfn_to_page(end_pfn);
1237 1238
1238 /* Check the starting page of each pageblock within the range */ 1239 /* Check the starting page of each pageblock within the range */
1239 for (; page < end_page; page = next_active_pageblock(page)) { 1240 for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1273,6 +1274,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1273 i++; 1274 i++;
1274 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 1275 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1275 continue; 1276 continue;
1277 /* Check if we got outside of the zone */
1278 if (zone && !zone_spans_pfn(zone, pfn + i))
1279 return 0;
1276 page = pfn_to_page(pfn + i); 1280 page = pfn_to_page(pfn + i);
1277 if (zone && page_zone(page) != zone) 1281 if (zone && page_zone(page) != zone)
1278 return 0; 1282 return 0;
@@ -1301,23 +1305,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1301static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1305static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1302{ 1306{
1303 unsigned long pfn; 1307 unsigned long pfn;
1304 struct page *page; 1308
1305 for (pfn = start; pfn < end; pfn++) { 1309 for (pfn = start; pfn < end; pfn++) {
1306 if (pfn_valid(pfn)) { 1310 struct page *page, *head;
1307 page = pfn_to_page(pfn); 1311 unsigned long skip;
1308 if (PageLRU(page)) 1312
1309 return pfn; 1313 if (!pfn_valid(pfn))
1310 if (__PageMovable(page)) 1314 continue;
1311 return pfn; 1315 page = pfn_to_page(pfn);
1312 if (PageHuge(page)) { 1316 if (PageLRU(page))
1313 if (hugepage_migration_supported(page_hstate(page)) && 1317 return pfn;
1314 page_huge_active(page)) 1318 if (__PageMovable(page))
1315 return pfn; 1319 return pfn;
1316 else 1320
1317 pfn = round_up(pfn + 1, 1321 if (!PageHuge(page))
1318 1 << compound_order(page)) - 1; 1322 continue;
1319 } 1323 head = compound_head(page);
1320 } 1324 if (hugepage_migration_supported(page_hstate(head)) &&
1325 page_huge_active(head))
1326 return pfn;
1327 skip = (1 << compound_order(head)) - (page - head);
1328 pfn += skip - 1;
1321 } 1329 }
1322 return 0; 1330 return 0;
1323} 1331}
@@ -1344,7 +1352,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1344{ 1352{
1345 unsigned long pfn; 1353 unsigned long pfn;
1346 struct page *page; 1354 struct page *page;
1347 int not_managed = 0;
1348 int ret = 0; 1355 int ret = 0;
1349 LIST_HEAD(source); 1356 LIST_HEAD(source);
1350 1357
@@ -1392,7 +1399,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1392 else 1399 else
1393 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 1400 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1394 if (!ret) { /* Success */ 1401 if (!ret) { /* Success */
1395 put_page(page);
1396 list_add_tail(&page->lru, &source); 1402 list_add_tail(&page->lru, &source);
1397 if (!__PageMovable(page)) 1403 if (!__PageMovable(page))
1398 inc_node_page_state(page, NR_ISOLATED_ANON + 1404 inc_node_page_state(page, NR_ISOLATED_ANON +
@@ -1401,22 +1407,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1401 } else { 1407 } else {
1402 pr_warn("failed to isolate pfn %lx\n", pfn); 1408 pr_warn("failed to isolate pfn %lx\n", pfn);
1403 dump_page(page, "isolation failed"); 1409 dump_page(page, "isolation failed");
1404 put_page(page);
1405 /* Because we don't have big zone->lock. we should
1406 check this again here. */
1407 if (page_count(page)) {
1408 not_managed++;
1409 ret = -EBUSY;
1410 break;
1411 }
1412 } 1410 }
1411 put_page(page);
1413 } 1412 }
1414 if (!list_empty(&source)) { 1413 if (!list_empty(&source)) {
1415 if (not_managed) {
1416 putback_movable_pages(&source);
1417 goto out;
1418 }
1419
1420 /* Allocate a new page from the nearest neighbor node */ 1414 /* Allocate a new page from the nearest neighbor node */
1421 ret = migrate_pages(&source, new_node_page, NULL, 0, 1415 ret = migrate_pages(&source, new_node_page, NULL, 0,
1422 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1416 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
@@ -1429,7 +1423,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1429 putback_movable_pages(&source); 1423 putback_movable_pages(&source);
1430 } 1424 }
1431 } 1425 }
1432out: 1426
1433 return ret; 1427 return ret;
1434} 1428}
1435 1429
@@ -1576,7 +1570,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576 we assume this for now. .*/ 1570 we assume this for now. .*/
1577 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, 1571 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
1578 &valid_end)) { 1572 &valid_end)) {
1579 mem_hotplug_done();
1580 ret = -EINVAL; 1573 ret = -EINVAL;
1581 reason = "multizone range"; 1574 reason = "multizone range";
1582 goto failed_removal; 1575 goto failed_removal;
@@ -1591,7 +1584,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
1591 MIGRATE_MOVABLE, 1584 MIGRATE_MOVABLE,
1592 SKIP_HWPOISON | REPORT_FAILURE); 1585 SKIP_HWPOISON | REPORT_FAILURE);
1593 if (ret) { 1586 if (ret) {
1594 mem_hotplug_done();
1595 reason = "failure to isolate range"; 1587 reason = "failure to isolate range";
1596 goto failed_removal; 1588 goto failed_removal;
1597 } 1589 }
diff --git a/mm/migrate.c b/mm/migrate.c
index a16b15090df3..d4fd680be3b0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
709 /* Simple case, sync compaction */ 709 /* Simple case, sync compaction */
710 if (mode != MIGRATE_ASYNC) { 710 if (mode != MIGRATE_ASYNC) {
711 do { 711 do {
712 get_bh(bh);
713 lock_buffer(bh); 712 lock_buffer(bh);
714 bh = bh->b_this_page; 713 bh = bh->b_this_page;
715 714
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
720 719
721 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 720 /* async case, we cannot block on lock_buffer so use trylock_buffer */
722 do { 721 do {
723 get_bh(bh);
724 if (!trylock_buffer(bh)) { 722 if (!trylock_buffer(bh)) {
725 /* 723 /*
726 * We failed to lock the buffer and cannot stall in 724 * We failed to lock the buffer and cannot stall in
727 * async migration. Release the taken locks 725 * async migration. Release the taken locks
728 */ 726 */
729 struct buffer_head *failed_bh = bh; 727 struct buffer_head *failed_bh = bh;
730 put_bh(failed_bh);
731 bh = head; 728 bh = head;
732 while (bh != failed_bh) { 729 while (bh != failed_bh) {
733 unlock_buffer(bh); 730 unlock_buffer(bh);
734 put_bh(bh);
735 bh = bh->b_this_page; 731 bh = bh->b_this_page;
736 } 732 }
737 return false; 733 return false;
@@ -818,7 +814,6 @@ unlock_buffers:
818 bh = head; 814 bh = head;
819 do { 815 do {
820 unlock_buffer(bh); 816 unlock_buffer(bh);
821 put_bh(bh);
822 bh = bh->b_this_page; 817 bh = bh->b_this_page;
823 818
824 } while (bh != head); 819 } while (bh != head);
@@ -1135,10 +1130,13 @@ out:
1135 * If migration is successful, decrease refcount of the newpage 1130 * If migration is successful, decrease refcount of the newpage
1136 * which will not free the page because new page owner increased 1131 * which will not free the page because new page owner increased
1137 * refcounter. As well, if it is LRU page, add the page to LRU 1132 * refcounter. As well, if it is LRU page, add the page to LRU
1138 * list in here. 1133 * list in here. Use the old state of the isolated source page to
1134 * determine if we migrated a LRU page. newpage was already unlocked
1135 * and possibly modified by its owner - don't rely on the page
1136 * state.
1139 */ 1137 */
1140 if (rc == MIGRATEPAGE_SUCCESS) { 1138 if (rc == MIGRATEPAGE_SUCCESS) {
1141 if (unlikely(__PageMovable(newpage))) 1139 if (unlikely(!is_lru))
1142 put_page(newpage); 1140 put_page(newpage);
1143 else 1141 else
1144 putback_lru_page(newpage); 1142 putback_lru_page(newpage);
diff --git a/mm/mincore.c b/mm/mincore.c
index f0f91461a9f4..218099b5ed31 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -42,14 +42,72 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
42 return 0; 42 return 0;
43} 43}
44 44
45static int mincore_unmapped_range(unsigned long addr, unsigned long end, 45/*
46 struct mm_walk *walk) 46 * Later we can get more picky about what "in core" means precisely.
47 * For now, simply check to see if the page is in the page cache,
48 * and is up to date; i.e. that no page-in operation would be required
49 * at this time if an application were to map and access this page.
50 */
51static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
52{
53 unsigned char present = 0;
54 struct page *page;
55
56 /*
57 * When tmpfs swaps out a page from a file, any process mapping that
58 * file will not get a swp_entry_t in its pte, but rather it is like
59 * any other file mapping (ie. marked !present and faulted in with
60 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
61 */
62#ifdef CONFIG_SWAP
63 if (shmem_mapping(mapping)) {
64 page = find_get_entry(mapping, pgoff);
65 /*
66 * shmem/tmpfs may return swap: account for swapcache
67 * page too.
68 */
69 if (xa_is_value(page)) {
70 swp_entry_t swp = radix_to_swp_entry(page);
71 page = find_get_page(swap_address_space(swp),
72 swp_offset(swp));
73 }
74 } else
75 page = find_get_page(mapping, pgoff);
76#else
77 page = find_get_page(mapping, pgoff);
78#endif
79 if (page) {
80 present = PageUptodate(page);
81 put_page(page);
82 }
83
84 return present;
85}
86
87static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
88 struct vm_area_struct *vma, unsigned char *vec)
47{ 89{
48 unsigned char *vec = walk->private;
49 unsigned long nr = (end - addr) >> PAGE_SHIFT; 90 unsigned long nr = (end - addr) >> PAGE_SHIFT;
91 int i;
50 92
51 memset(vec, 0, nr); 93 if (vma->vm_file) {
52 walk->private += nr; 94 pgoff_t pgoff;
95
96 pgoff = linear_page_index(vma, addr);
97 for (i = 0; i < nr; i++, pgoff++)
98 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
99 } else {
100 for (i = 0; i < nr; i++)
101 vec[i] = 0;
102 }
103 return nr;
104}
105
106static int mincore_unmapped_range(unsigned long addr, unsigned long end,
107 struct mm_walk *walk)
108{
109 walk->private += __mincore_unmapped_range(addr, end,
110 walk->vma, walk->private);
53 return 0; 111 return 0;
54} 112}
55 113
@@ -69,9 +127,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
69 goto out; 127 goto out;
70 } 128 }
71 129
72 /* We'll consider a THP page under construction to be there */
73 if (pmd_trans_unstable(pmd)) { 130 if (pmd_trans_unstable(pmd)) {
74 memset(vec, 1, nr); 131 __mincore_unmapped_range(addr, end, vma, vec);
75 goto out; 132 goto out;
76 } 133 }
77 134
@@ -80,17 +137,28 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
80 pte_t pte = *ptep; 137 pte_t pte = *ptep;
81 138
82 if (pte_none(pte)) 139 if (pte_none(pte))
83 *vec = 0; 140 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
141 vma, vec);
84 else if (pte_present(pte)) 142 else if (pte_present(pte))
85 *vec = 1; 143 *vec = 1;
86 else { /* pte is a swap entry */ 144 else { /* pte is a swap entry */
87 swp_entry_t entry = pte_to_swp_entry(pte); 145 swp_entry_t entry = pte_to_swp_entry(pte);
88 146
89 /* 147 if (non_swap_entry(entry)) {
90 * migration or hwpoison entries are always 148 /*
91 * uptodate 149 * migration or hwpoison entries are always
92 */ 150 * uptodate
93 *vec = !!non_swap_entry(entry); 151 */
152 *vec = 1;
153 } else {
154#ifdef CONFIG_SWAP
155 *vec = mincore_page(swap_address_space(entry),
156 swp_offset(entry));
157#else
158 WARN_ON(1);
159 *vec = 1;
160#endif
161 }
94 } 162 }
95 vec++; 163 vec++;
96 } 164 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f0e8cd9edb1a..26ea8636758f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -647,8 +647,8 @@ static int oom_reaper(void *unused)
647 647
648static void wake_oom_reaper(struct task_struct *tsk) 648static void wake_oom_reaper(struct task_struct *tsk)
649{ 649{
650 /* tsk is already queued? */ 650 /* mm is already queued? */
651 if (tsk == oom_reaper_list || tsk->oom_reaper_list) 651 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
652 return; 652 return;
653 653
654 get_task_struct(tsk); 654 get_task_struct(tsk);
@@ -975,6 +975,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
975 * still freeing memory. 975 * still freeing memory.
976 */ 976 */
977 read_lock(&tasklist_lock); 977 read_lock(&tasklist_lock);
978
979 /*
980 * The task 'p' might have already exited before reaching here. The
981 * put_task_struct() will free task_struct 'p' while the loop still try
982 * to access the field of 'p', so, get an extra reference.
983 */
984 get_task_struct(p);
978 for_each_thread(p, t) { 985 for_each_thread(p, t) {
979 list_for_each_entry(child, &t->children, sibling) { 986 list_for_each_entry(child, &t->children, sibling) {
980 unsigned int child_points; 987 unsigned int child_points;
@@ -994,6 +1001,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
994 } 1001 }
995 } 1002 }
996 } 1003 }
1004 put_task_struct(p);
997 read_unlock(&tasklist_lock); 1005 read_unlock(&tasklist_lock);
998 1006
999 /* 1007 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d295c9bc01a8..35fdde041f5c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5701,18 +5701,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5701 cond_resched(); 5701 cond_resched();
5702 } 5702 }
5703 } 5703 }
5704#ifdef CONFIG_SPARSEMEM
5705 /*
5706 * If the zone does not span the rest of the section then
5707 * we should at least initialize those pages. Otherwise we
5708 * could blow up on a poisoned page in some paths which depend
5709 * on full sections being initialized (e.g. memory hotplug).
5710 */
5711 while (end_pfn % PAGES_PER_SECTION) {
5712 __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
5713 end_pfn++;
5714 }
5715#endif
5716} 5704}
5717 5705
5718#ifdef CONFIG_ZONE_DEVICE 5706#ifdef CONFIG_ZONE_DEVICE
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 70417e9b932d..314bbc8010fb 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
114 dst = (ax25_address *)(bp + 1); 114 dst = (ax25_address *)(bp + 1);
115 src = (ax25_address *)(bp + 8); 115 src = (ax25_address *)(bp + 8);
116 116
117 ax25_route_lock_use();
117 route = ax25_get_route(dst, NULL); 118 route = ax25_get_route(dst, NULL);
118 if (route) { 119 if (route) {
119 digipeat = route->digipeat; 120 digipeat = route->digipeat;
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
206 ax25_queue_xmit(skb, dev); 207 ax25_queue_xmit(skb, dev);
207 208
208put: 209put:
209 if (route)
210 ax25_put_route(route);
211 210
211 ax25_route_lock_unuse();
212 return NETDEV_TX_OK; 212 return NETDEV_TX_OK;
213} 213}
214 214
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a0eff323af12..66f74c85cf6b 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -40,7 +40,7 @@
40#include <linux/export.h> 40#include <linux/export.h>
41 41
42static ax25_route *ax25_route_list; 42static ax25_route *ax25_route_list;
43static DEFINE_RWLOCK(ax25_route_lock); 43DEFINE_RWLOCK(ax25_route_lock);
44 44
45void ax25_rt_device_down(struct net_device *dev) 45void ax25_rt_device_down(struct net_device *dev)
46{ 46{
@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
335 * Find AX.25 route 335 * Find AX.25 route
336 * 336 *
337 * Only routes with a reference count of zero can be destroyed. 337 * Only routes with a reference count of zero can be destroyed.
338 * Must be called with ax25_route_lock read locked.
338 */ 339 */
339ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) 340ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
340{ 341{
@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
342 ax25_route *ax25_def_rt = NULL; 343 ax25_route *ax25_def_rt = NULL;
343 ax25_route *ax25_rt; 344 ax25_route *ax25_rt;
344 345
345 read_lock(&ax25_route_lock);
346 /* 346 /*
347 * Bind to the physical interface we heard them on, or the default 347 * Bind to the physical interface we heard them on, or the default
348 * route if none is found; 348 * route if none is found;
@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
365 if (ax25_spe_rt != NULL) 365 if (ax25_spe_rt != NULL)
366 ax25_rt = ax25_spe_rt; 366 ax25_rt = ax25_spe_rt;
367 367
368 if (ax25_rt != NULL)
369 ax25_hold_route(ax25_rt);
370
371 read_unlock(&ax25_route_lock);
372
373 return ax25_rt; 368 return ax25_rt;
374} 369}
375 370
@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
400 ax25_route *ax25_rt; 395 ax25_route *ax25_rt;
401 int err = 0; 396 int err = 0;
402 397
403 if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) 398 ax25_route_lock_use();
399 ax25_rt = ax25_get_route(addr, NULL);
400 if (!ax25_rt) {
401 ax25_route_lock_unuse();
404 return -EHOSTUNREACH; 402 return -EHOSTUNREACH;
405 403 }
406 if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { 404 if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
407 err = -EHOSTUNREACH; 405 err = -EHOSTUNREACH;
408 goto put; 406 goto put;
@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
437 } 435 }
438 436
439put: 437put:
440 ax25_put_route(ax25_rt); 438 ax25_route_lock_unuse();
441
442 return err; 439 return err;
443} 440}
444 441
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5e55cef0cec3..6693e209efe8 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
2293 2293
2294 xt_compat_lock(NFPROTO_BRIDGE); 2294 xt_compat_lock(NFPROTO_BRIDGE);
2295 2295
2296 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); 2296 if (tmp.nentries) {
2297 if (ret < 0) 2297 ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2298 goto out_unlock; 2298 if (ret < 0)
2299 goto out_unlock;
2300 }
2301
2299 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2302 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2300 if (ret < 0) 2303 if (ret < 0)
2301 goto out_unlock; 2304 goto out_unlock;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 0af8f0db892a..79bb8afa9c0c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -67,6 +67,9 @@
67 */ 67 */
68#define MAX_NFRAMES 256 68#define MAX_NFRAMES 256
69 69
70/* limit timers to 400 days for sending/timeouts */
71#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
72
70/* use of last_frames[index].flags */ 73/* use of last_frames[index].flags */
71#define RX_RECV 0x40 /* received data for this element */ 74#define RX_RECV 0x40 /* received data for this element */
72#define RX_THR 0x80 /* element not been sent due to throttle feature */ 75#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
140 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); 143 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
141} 144}
142 145
146/* check limitations for timeval provided by user */
147static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
148{
149 if ((msg_head->ival1.tv_sec < 0) ||
150 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
151 (msg_head->ival1.tv_usec < 0) ||
152 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
153 (msg_head->ival2.tv_sec < 0) ||
154 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
155 (msg_head->ival2.tv_usec < 0) ||
156 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
157 return true;
158
159 return false;
160}
161
143#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) 162#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
144#define OPSIZ sizeof(struct bcm_op) 163#define OPSIZ sizeof(struct bcm_op)
145#define MHSIZ sizeof(struct bcm_msg_head) 164#define MHSIZ sizeof(struct bcm_msg_head)
@@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
873 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) 892 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
874 return -EINVAL; 893 return -EINVAL;
875 894
895 /* check timeval limitations */
896 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
897 return -EINVAL;
898
876 /* check the given can_id */ 899 /* check the given can_id */
877 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); 900 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
878 if (op) { 901 if (op) {
@@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1053 (!(msg_head->can_id & CAN_RTR_FLAG)))) 1076 (!(msg_head->can_id & CAN_RTR_FLAG))))
1054 return -EINVAL; 1077 return -EINVAL;
1055 1078
1079 /* check timeval limitations */
1080 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1081 return -EINVAL;
1082
1056 /* check the given can_id */ 1083 /* check the given can_id */
1057 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); 1084 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1058 if (op) { 1085 if (op) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index d5718284db57..3661cdd927f1 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -3206,9 +3206,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
3206 dout("con_keepalive %p\n", con); 3206 dout("con_keepalive %p\n", con);
3207 mutex_lock(&con->mutex); 3207 mutex_lock(&con->mutex);
3208 clear_standby(con); 3208 clear_standby(con);
3209 con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
3209 mutex_unlock(&con->mutex); 3210 mutex_unlock(&con->mutex);
3210 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3211
3211 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3212 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3212 queue_con(con); 3213 queue_con(con);
3213} 3214}
3214EXPORT_SYMBOL(ceph_con_keepalive); 3215EXPORT_SYMBOL(ceph_con_keepalive);
diff --git a/net/core/dev.c b/net/core/dev.c
index 82f20022259d..8e276e0192a1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8712,6 +8712,9 @@ int init_dummy_netdev(struct net_device *dev)
8712 set_bit(__LINK_STATE_PRESENT, &dev->state); 8712 set_bit(__LINK_STATE_PRESENT, &dev->state);
8713 set_bit(__LINK_STATE_START, &dev->state); 8713 set_bit(__LINK_STATE_START, &dev->state);
8714 8714
8715 /* napi_busy_loop stats accounting wants this */
8716 dev_net_set(dev, &init_net);
8717
8715 /* Note : We dont allocate pcpu_refcnt for dummy devices, 8718 /* Note : We dont allocate pcpu_refcnt for dummy devices,
8716 * because users of this 'device' dont need to change 8719 * because users of this 'device' dont need to change
8717 * its refcount. 8720 * its refcount.
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index d0b3e69c6b39..0962f9201baa 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -56,7 +56,7 @@
56#include <net/dn_neigh.h> 56#include <net/dn_neigh.h>
57#include <net/dn_fib.h> 57#include <net/dn_fib.h>
58 58
59#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) 59#define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn))
60 60
61static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; 61static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
62static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; 62static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index a4bf22ee3aed..7c4a41dc04bb 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -25,6 +25,7 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27#include <net/gre.h> 27#include <net/gre.h>
28#include <net/erspan.h>
28 29
29#include <net/icmp.h> 30#include <net/icmp.h>
30#include <net/route.h> 31#include <net/route.h>
@@ -119,6 +120,22 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
119 hdr_len += 4; 120 hdr_len += 4;
120 } 121 }
121 tpi->hdr_len = hdr_len; 122 tpi->hdr_len = hdr_len;
123
124 /* ERSPAN ver 1 and 2 protocol sets GRE key field
125 * to 0 and sets the configured key in the
126 * inner erspan header field
127 */
128 if (greh->protocol == htons(ETH_P_ERSPAN) ||
129 greh->protocol == htons(ETH_P_ERSPAN2)) {
130 struct erspan_base_hdr *ershdr;
131
132 if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr)))
133 return -EINVAL;
134
135 ershdr = (struct erspan_base_hdr *)options;
136 tpi->key = cpu_to_be32(get_session_id(ershdr));
137 }
138
122 return hdr_len; 139 return hdr_len;
123} 140}
124EXPORT_SYMBOL(gre_parse_header); 141EXPORT_SYMBOL(gre_parse_header);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b1a74d80d868..20a64fe6254b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -268,20 +268,11 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
268 int len; 268 int len;
269 269
270 itn = net_generic(net, erspan_net_id); 270 itn = net_generic(net, erspan_net_id);
271 len = gre_hdr_len + sizeof(*ershdr);
272
273 /* Check based hdr len */
274 if (unlikely(!pskb_may_pull(skb, len)))
275 return PACKET_REJECT;
276 271
277 iph = ip_hdr(skb); 272 iph = ip_hdr(skb);
278 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 273 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
279 ver = ershdr->ver; 274 ver = ershdr->ver;
280 275
281 /* The original GRE header does not have key field,
282 * Use ERSPAN 10-bit session ID as key.
283 */
284 tpi->key = cpu_to_be32(get_session_id(ershdr));
285 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 276 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
286 tpi->flags | TUNNEL_KEY, 277 tpi->flags | TUNNEL_KEY,
287 iph->saddr, iph->daddr, tpi->key); 278 iph->saddr, iph->daddr, tpi->key);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..054d01c16dc6 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -644,13 +644,19 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
644 dst = tnl_params->daddr; 644 dst = tnl_params->daddr;
645 if (dst == 0) { 645 if (dst == 0) {
646 /* NBMA tunnel */ 646 /* NBMA tunnel */
647 struct ip_tunnel_info *tun_info;
647 648
648 if (!skb_dst(skb)) { 649 if (!skb_dst(skb)) {
649 dev->stats.tx_fifo_errors++; 650 dev->stats.tx_fifo_errors++;
650 goto tx_error; 651 goto tx_error;
651 } 652 }
652 653
653 if (skb->protocol == htons(ETH_P_IP)) { 654 tun_info = skb_tunnel_info(skb);
655 if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX) &&
656 ip_tunnel_info_af(tun_info) == AF_INET &&
657 tun_info->key.u.ipv4.dst)
658 dst = tun_info->key.u.ipv4.dst;
659 else if (skb->protocol == htons(ETH_P_IP)) {
654 rt = skb_rtable(skb); 660 rt = skb_rtable(skb);
655 dst = rt_nexthop(rt, inner_iph->daddr); 661 dst = rt_nexthop(rt, inner_iph->daddr);
656 } 662 }
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index d7b43e700023..68a21bf75dd0 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@ drop:
74 return 0; 74 return 0;
75} 75}
76 76
77static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
78 int encap_type)
79{
80 struct ip_tunnel *tunnel;
81 const struct iphdr *iph = ip_hdr(skb);
82 struct net *net = dev_net(skb->dev);
83 struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
84
85 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
86 iph->saddr, iph->daddr, 0);
87 if (tunnel) {
88 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
89 goto drop;
90
91 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
92
93 skb->dev = tunnel->dev;
94
95 return xfrm_input(skb, nexthdr, spi, encap_type);
96 }
97
98 return -EINVAL;
99drop:
100 kfree_skb(skb);
101 return 0;
102}
103
77static int vti_rcv(struct sk_buff *skb) 104static int vti_rcv(struct sk_buff *skb)
78{ 105{
79 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 106 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
82 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); 109 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
83} 110}
84 111
112static int vti_rcv_ipip(struct sk_buff *skb)
113{
114 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
115 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
116
117 return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
118}
119
85static int vti_rcv_cb(struct sk_buff *skb, int err) 120static int vti_rcv_cb(struct sk_buff *skb, int err)
86{ 121{
87 unsigned short family; 122 unsigned short family;
@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
435 .priority = 100, 470 .priority = 100,
436}; 471};
437 472
473static struct xfrm_tunnel ipip_handler __read_mostly = {
474 .handler = vti_rcv_ipip,
475 .err_handler = vti4_err,
476 .priority = 0,
477};
478
438static int __net_init vti_init_net(struct net *net) 479static int __net_init vti_init_net(struct net *net)
439{ 480{
440 int err; 481 int err;
@@ -603,6 +644,13 @@ static int __init vti_init(void)
603 if (err < 0) 644 if (err < 0)
604 goto xfrm_proto_comp_failed; 645 goto xfrm_proto_comp_failed;
605 646
647 msg = "ipip tunnel";
648 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
649 if (err < 0) {
650 pr_info("%s: cant't register tunnel\n",__func__);
651 goto xfrm_tunnel_failed;
652 }
653
606 msg = "netlink interface"; 654 msg = "netlink interface";
607 err = rtnl_link_register(&vti_link_ops); 655 err = rtnl_link_register(&vti_link_ops);
608 if (err < 0) 656 if (err < 0)
@@ -612,6 +660,8 @@ static int __init vti_init(void)
612 660
613rtnl_link_failed: 661rtnl_link_failed:
614 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 662 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
663xfrm_tunnel_failed:
664 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
615xfrm_proto_comp_failed: 665xfrm_proto_comp_failed:
616 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 666 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
617xfrm_proto_ah_failed: 667xfrm_proto_ah_failed:
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b61977db9b7f..2a909e5f9ba0 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net)
846 846
847static void clusterip_net_exit(struct net *net) 847static void clusterip_net_exit(struct net *net)
848{ 848{
849#ifdef CONFIG_PROC_FS
849 struct clusterip_net *cn = clusterip_pernet(net); 850 struct clusterip_net *cn = clusterip_pernet(net);
850 851
851#ifdef CONFIG_PROC_FS
852 mutex_lock(&cn->mutex); 852 mutex_lock(&cn->mutex);
853 proc_remove(cn->procdir); 853 proc_remove(cn->procdir);
854 cn->procdir = NULL; 854 cn->procdir = NULL;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 93d5ad2b1a69..84c358804355 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3495,8 +3495,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3495 3495
3496 if (!addrconf_link_ready(dev)) { 3496 if (!addrconf_link_ready(dev)) {
3497 /* device is not ready yet. */ 3497 /* device is not ready yet. */
3498 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", 3498 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3499 dev->name); 3499 dev->name);
3500 break; 3500 break;
3501 } 3501 }
3502 3502
@@ -5120,6 +5120,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5120 if (idev) { 5120 if (idev) {
5121 err = in6_dump_addrs(idev, skb, cb, s_ip_idx, 5121 err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5122 &fillargs); 5122 &fillargs);
5123 if (err > 0)
5124 err = 0;
5123 } 5125 }
5124 goto put_tgt_net; 5126 goto put_tgt_net;
5125 } 5127 }
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b1be67ca6768..4416368dbd49 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -534,13 +534,9 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
534 struct ip6_tnl *tunnel; 534 struct ip6_tnl *tunnel;
535 u8 ver; 535 u8 ver;
536 536
537 if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
538 return PACKET_REJECT;
539
540 ipv6h = ipv6_hdr(skb); 537 ipv6h = ipv6_hdr(skb);
541 ershdr = (struct erspan_base_hdr *)skb->data; 538 ershdr = (struct erspan_base_hdr *)skb->data;
542 ver = ershdr->ver; 539 ver = ershdr->ver;
543 tpi->key = cpu_to_be32(get_session_id(ershdr));
544 540
545 tunnel = ip6gre_tunnel_lookup(skb->dev, 541 tunnel = ip6gre_tunnel_lookup(skb->dev,
546 &ipv6h->saddr, &ipv6h->daddr, tpi->key, 542 &ipv6h->saddr, &ipv6h->daddr, tpi->key,
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 30337b38274b..cc01aa3f2b5e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1516,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
1516 continue; 1516 continue;
1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); 1517 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1518 list_del_rcu(&c->list); 1518 list_del_rcu(&c->list);
1519 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1520 FIB_EVENT_ENTRY_DEL,
1521 (struct mfc6_cache *)c, mrt->id);
1519 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); 1522 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1520 mr_cache_put(c); 1523 mr_cache_put(c);
1521 } 1524 }
@@ -1524,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
1524 spin_lock_bh(&mfc_unres_lock); 1527 spin_lock_bh(&mfc_unres_lock);
1525 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { 1528 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1526 list_del(&c->list); 1529 list_del(&c->list);
1527 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1528 FIB_EVENT_ENTRY_DEL,
1529 (struct mfc6_cache *)c,
1530 mrt->id);
1531 mr6_netlink_event(mrt, (struct mfc6_cache *)c, 1530 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1532 RTM_DELROUTE); 1531 RTM_DELROUTE);
1533 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); 1532 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index de65fe3ed9cc..2493c74c2d37 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1490,6 +1490,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1490 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1490 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1491 sta->sta.tdls = true; 1491 sta->sta.tdls = true;
1492 1492
1493 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1494 !sdata->u.mgd.associated)
1495 return -EINVAL;
1496
1493 err = sta_apply_parameters(local, sta, params); 1497 err = sta_apply_parameters(local, sta, params);
1494 if (err) { 1498 if (err) {
1495 sta_info_free(local, sta); 1499 sta_info_free(local, sta);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 45aad3d3108c..bb4d71efb6fb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -231,7 +231,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
231 struct ieee80211_hdr_3addr hdr; 231 struct ieee80211_hdr_3addr hdr;
232 u8 category; 232 u8 category;
233 u8 action_code; 233 u8 action_code;
234 } __packed action; 234 } __packed __aligned(2) action;
235 235
236 if (!sdata) 236 if (!sdata)
237 return; 237 return;
@@ -2723,7 +2723,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2723 skb_set_queue_mapping(skb, q); 2723 skb_set_queue_mapping(skb, q);
2724 2724
2725 if (!--mesh_hdr->ttl) { 2725 if (!--mesh_hdr->ttl) {
2726 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2726 if (!is_multicast_ether_addr(hdr->addr1))
2727 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
2728 dropped_frames_ttl);
2727 goto out; 2729 goto out;
2728 } 2730 }
2729 2731
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 432141f04af3..7d6318664eb2 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2221,6 +2221,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
2221 u->udp_timeout); 2221 u->udp_timeout);
2222 2222
2223#ifdef CONFIG_IP_VS_PROTO_TCP 2223#ifdef CONFIG_IP_VS_PROTO_TCP
2224 if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
2225 u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
2226 return -EINVAL;
2227 }
2228#endif
2229
2230#ifdef CONFIG_IP_VS_PROTO_UDP
2231 if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
2232 return -EINVAL;
2233#endif
2234
2235#ifdef CONFIG_IP_VS_PROTO_TCP
2224 if (u->tcp_timeout) { 2236 if (u->tcp_timeout) {
2225 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); 2237 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
2226 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] 2238 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 6f41dd74729d..1f1d90c1716b 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
66 int ttl_check, 66 int ttl_check,
67 struct nf_osf_hdr_ctx *ctx) 67 struct nf_osf_hdr_ctx *ctx)
68{ 68{
69 const __u8 *optpinit = ctx->optp;
69 unsigned int check_WSS = 0; 70 unsigned int check_WSS = 0;
70 int fmatch = FMATCH_WRONG; 71 int fmatch = FMATCH_WRONG;
71 int foptsize, optnum; 72 int foptsize, optnum;
@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
155 } 156 }
156 } 157 }
157 158
159 if (fmatch != FMATCH_OK)
160 ctx->optp = optpinit;
161
158 return fmatch == FMATCH_OK; 162 return fmatch == FMATCH_OK;
159} 163}
160 164
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 7334e0b80a5e..5eb269428832 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -22,11 +22,15 @@
22#include <linux/netfilter_bridge/ebtables.h> 22#include <linux/netfilter_bridge/ebtables.h>
23#include <linux/netfilter_arp/arp_tables.h> 23#include <linux/netfilter_arp/arp_tables.h>
24#include <net/netfilter/nf_tables.h> 24#include <net/netfilter/nf_tables.h>
25#include <net/netns/generic.h>
25 26
26struct nft_xt { 27struct nft_xt {
27 struct list_head head; 28 struct list_head head;
28 struct nft_expr_ops ops; 29 struct nft_expr_ops ops;
29 unsigned int refcnt; 30 refcount_t refcnt;
31
32 /* used only when transaction mutex is locked */
33 unsigned int listcnt;
30 34
31 /* Unlike other expressions, ops doesn't have static storage duration. 35 /* Unlike other expressions, ops doesn't have static storage duration.
32 * nft core assumes they do. We use kfree_rcu so that nft core can 36 * nft core assumes they do. We use kfree_rcu so that nft core can
@@ -43,10 +47,24 @@ struct nft_xt_match_priv {
43 void *info; 47 void *info;
44}; 48};
45 49
50struct nft_compat_net {
51 struct list_head nft_target_list;
52 struct list_head nft_match_list;
53};
54
55static unsigned int nft_compat_net_id __read_mostly;
56static struct nft_expr_type nft_match_type;
57static struct nft_expr_type nft_target_type;
58
59static struct nft_compat_net *nft_compat_pernet(struct net *net)
60{
61 return net_generic(net, nft_compat_net_id);
62}
63
46static bool nft_xt_put(struct nft_xt *xt) 64static bool nft_xt_put(struct nft_xt *xt)
47{ 65{
48 if (--xt->refcnt == 0) { 66 if (refcount_dec_and_test(&xt->refcnt)) {
49 list_del(&xt->head); 67 WARN_ON_ONCE(!list_empty(&xt->head));
50 kfree_rcu(xt, rcu_head); 68 kfree_rcu(xt, rcu_head);
51 return true; 69 return true;
52 } 70 }
@@ -273,7 +291,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
273 return -EINVAL; 291 return -EINVAL;
274 292
275 nft_xt = container_of(expr->ops, struct nft_xt, ops); 293 nft_xt = container_of(expr->ops, struct nft_xt, ops);
276 nft_xt->refcnt++; 294 refcount_inc(&nft_xt->refcnt);
277 return 0; 295 return 0;
278} 296}
279 297
@@ -486,7 +504,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
486 return ret; 504 return ret;
487 505
488 nft_xt = container_of(expr->ops, struct nft_xt, ops); 506 nft_xt = container_of(expr->ops, struct nft_xt, ops);
489 nft_xt->refcnt++; 507 refcount_inc(&nft_xt->refcnt);
490 return 0; 508 return 0;
491} 509}
492 510
@@ -540,6 +558,43 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
540 __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); 558 __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
541} 559}
542 560
561static void nft_compat_activate(const struct nft_ctx *ctx,
562 const struct nft_expr *expr,
563 struct list_head *h)
564{
565 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
566
567 if (xt->listcnt == 0)
568 list_add(&xt->head, h);
569
570 xt->listcnt++;
571}
572
573static void nft_compat_activate_mt(const struct nft_ctx *ctx,
574 const struct nft_expr *expr)
575{
576 struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
577
578 nft_compat_activate(ctx, expr, &cn->nft_match_list);
579}
580
581static void nft_compat_activate_tg(const struct nft_ctx *ctx,
582 const struct nft_expr *expr)
583{
584 struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
585
586 nft_compat_activate(ctx, expr, &cn->nft_target_list);
587}
588
589static void nft_compat_deactivate(const struct nft_ctx *ctx,
590 const struct nft_expr *expr)
591{
592 struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
593
594 if (--xt->listcnt == 0)
595 list_del_init(&xt->head);
596}
597
543static void 598static void
544nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 599nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
545{ 600{
@@ -734,10 +789,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
734 .cb = nfnl_nft_compat_cb, 789 .cb = nfnl_nft_compat_cb,
735}; 790};
736 791
737static LIST_HEAD(nft_match_list);
738
739static struct nft_expr_type nft_match_type;
740
741static bool nft_match_cmp(const struct xt_match *match, 792static bool nft_match_cmp(const struct xt_match *match,
742 const char *name, u32 rev, u32 family) 793 const char *name, u32 rev, u32 family)
743{ 794{
@@ -749,6 +800,7 @@ static const struct nft_expr_ops *
749nft_match_select_ops(const struct nft_ctx *ctx, 800nft_match_select_ops(const struct nft_ctx *ctx,
750 const struct nlattr * const tb[]) 801 const struct nlattr * const tb[])
751{ 802{
803 struct nft_compat_net *cn;
752 struct nft_xt *nft_match; 804 struct nft_xt *nft_match;
753 struct xt_match *match; 805 struct xt_match *match;
754 unsigned int matchsize; 806 unsigned int matchsize;
@@ -765,8 +817,10 @@ nft_match_select_ops(const struct nft_ctx *ctx,
765 rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); 817 rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
766 family = ctx->family; 818 family = ctx->family;
767 819
820 cn = nft_compat_pernet(ctx->net);
821
768 /* Re-use the existing match if it's already loaded. */ 822 /* Re-use the existing match if it's already loaded. */
769 list_for_each_entry(nft_match, &nft_match_list, head) { 823 list_for_each_entry(nft_match, &cn->nft_match_list, head) {
770 struct xt_match *match = nft_match->ops.data; 824 struct xt_match *match = nft_match->ops.data;
771 825
772 if (nft_match_cmp(match, mt_name, rev, family)) 826 if (nft_match_cmp(match, mt_name, rev, family))
@@ -789,11 +843,13 @@ nft_match_select_ops(const struct nft_ctx *ctx,
789 goto err; 843 goto err;
790 } 844 }
791 845
792 nft_match->refcnt = 0; 846 refcount_set(&nft_match->refcnt, 0);
793 nft_match->ops.type = &nft_match_type; 847 nft_match->ops.type = &nft_match_type;
794 nft_match->ops.eval = nft_match_eval; 848 nft_match->ops.eval = nft_match_eval;
795 nft_match->ops.init = nft_match_init; 849 nft_match->ops.init = nft_match_init;
796 nft_match->ops.destroy = nft_match_destroy; 850 nft_match->ops.destroy = nft_match_destroy;
851 nft_match->ops.activate = nft_compat_activate_mt;
852 nft_match->ops.deactivate = nft_compat_deactivate;
797 nft_match->ops.dump = nft_match_dump; 853 nft_match->ops.dump = nft_match_dump;
798 nft_match->ops.validate = nft_match_validate; 854 nft_match->ops.validate = nft_match_validate;
799 nft_match->ops.data = match; 855 nft_match->ops.data = match;
@@ -810,7 +866,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
810 866
811 nft_match->ops.size = matchsize; 867 nft_match->ops.size = matchsize;
812 868
813 list_add(&nft_match->head, &nft_match_list); 869 nft_match->listcnt = 1;
870 list_add(&nft_match->head, &cn->nft_match_list);
814 871
815 return &nft_match->ops; 872 return &nft_match->ops;
816err: 873err:
@@ -826,10 +883,6 @@ static struct nft_expr_type nft_match_type __read_mostly = {
826 .owner = THIS_MODULE, 883 .owner = THIS_MODULE,
827}; 884};
828 885
829static LIST_HEAD(nft_target_list);
830
831static struct nft_expr_type nft_target_type;
832
833static bool nft_target_cmp(const struct xt_target *tg, 886static bool nft_target_cmp(const struct xt_target *tg,
834 const char *name, u32 rev, u32 family) 887 const char *name, u32 rev, u32 family)
835{ 888{
@@ -841,6 +894,7 @@ static const struct nft_expr_ops *
841nft_target_select_ops(const struct nft_ctx *ctx, 894nft_target_select_ops(const struct nft_ctx *ctx,
842 const struct nlattr * const tb[]) 895 const struct nlattr * const tb[])
843{ 896{
897 struct nft_compat_net *cn;
844 struct nft_xt *nft_target; 898 struct nft_xt *nft_target;
845 struct xt_target *target; 899 struct xt_target *target;
846 char *tg_name; 900 char *tg_name;
@@ -861,8 +915,9 @@ nft_target_select_ops(const struct nft_ctx *ctx,
861 strcmp(tg_name, "standard") == 0) 915 strcmp(tg_name, "standard") == 0)
862 return ERR_PTR(-EINVAL); 916 return ERR_PTR(-EINVAL);
863 917
918 cn = nft_compat_pernet(ctx->net);
864 /* Re-use the existing target if it's already loaded. */ 919 /* Re-use the existing target if it's already loaded. */
865 list_for_each_entry(nft_target, &nft_target_list, head) { 920 list_for_each_entry(nft_target, &cn->nft_target_list, head) {
866 struct xt_target *target = nft_target->ops.data; 921 struct xt_target *target = nft_target->ops.data;
867 922
868 if (!target->target) 923 if (!target->target)
@@ -893,11 +948,13 @@ nft_target_select_ops(const struct nft_ctx *ctx,
893 goto err; 948 goto err;
894 } 949 }
895 950
896 nft_target->refcnt = 0; 951 refcount_set(&nft_target->refcnt, 0);
897 nft_target->ops.type = &nft_target_type; 952 nft_target->ops.type = &nft_target_type;
898 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 953 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
899 nft_target->ops.init = nft_target_init; 954 nft_target->ops.init = nft_target_init;
900 nft_target->ops.destroy = nft_target_destroy; 955 nft_target->ops.destroy = nft_target_destroy;
956 nft_target->ops.activate = nft_compat_activate_tg;
957 nft_target->ops.deactivate = nft_compat_deactivate;
901 nft_target->ops.dump = nft_target_dump; 958 nft_target->ops.dump = nft_target_dump;
902 nft_target->ops.validate = nft_target_validate; 959 nft_target->ops.validate = nft_target_validate;
903 nft_target->ops.data = target; 960 nft_target->ops.data = target;
@@ -907,7 +964,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
907 else 964 else
908 nft_target->ops.eval = nft_target_eval_xt; 965 nft_target->ops.eval = nft_target_eval_xt;
909 966
910 list_add(&nft_target->head, &nft_target_list); 967 nft_target->listcnt = 1;
968 list_add(&nft_target->head, &cn->nft_target_list);
911 969
912 return &nft_target->ops; 970 return &nft_target->ops;
913err: 971err:
@@ -923,13 +981,74 @@ static struct nft_expr_type nft_target_type __read_mostly = {
923 .owner = THIS_MODULE, 981 .owner = THIS_MODULE,
924}; 982};
925 983
984static int __net_init nft_compat_init_net(struct net *net)
985{
986 struct nft_compat_net *cn = nft_compat_pernet(net);
987
988 INIT_LIST_HEAD(&cn->nft_target_list);
989 INIT_LIST_HEAD(&cn->nft_match_list);
990
991 return 0;
992}
993
994static void __net_exit nft_compat_exit_net(struct net *net)
995{
996 struct nft_compat_net *cn = nft_compat_pernet(net);
997 struct nft_xt *xt, *next;
998
999 if (list_empty(&cn->nft_match_list) &&
1000 list_empty(&cn->nft_target_list))
1001 return;
1002
1003 /* If there was an error that caused nft_xt expr to not be initialized
1004 * fully and noone else requested the same expression later, the lists
1005 * contain 0-refcount entries that still hold module reference.
1006 *
1007 * Clean them here.
1008 */
1009 mutex_lock(&net->nft.commit_mutex);
1010 list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) {
1011 struct xt_target *target = xt->ops.data;
1012
1013 list_del_init(&xt->head);
1014
1015 if (refcount_read(&xt->refcnt))
1016 continue;
1017 module_put(target->me);
1018 kfree(xt);
1019 }
1020
1021 list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) {
1022 struct xt_match *match = xt->ops.data;
1023
1024 list_del_init(&xt->head);
1025
1026 if (refcount_read(&xt->refcnt))
1027 continue;
1028 module_put(match->me);
1029 kfree(xt);
1030 }
1031 mutex_unlock(&net->nft.commit_mutex);
1032}
1033
1034static struct pernet_operations nft_compat_net_ops = {
1035 .init = nft_compat_init_net,
1036 .exit = nft_compat_exit_net,
1037 .id = &nft_compat_net_id,
1038 .size = sizeof(struct nft_compat_net),
1039};
1040
926static int __init nft_compat_module_init(void) 1041static int __init nft_compat_module_init(void)
927{ 1042{
928 int ret; 1043 int ret;
929 1044
1045 ret = register_pernet_subsys(&nft_compat_net_ops);
1046 if (ret < 0)
1047 goto err_target;
1048
930 ret = nft_register_expr(&nft_match_type); 1049 ret = nft_register_expr(&nft_match_type);
931 if (ret < 0) 1050 if (ret < 0)
932 return ret; 1051 goto err_pernet;
933 1052
934 ret = nft_register_expr(&nft_target_type); 1053 ret = nft_register_expr(&nft_target_type);
935 if (ret < 0) 1054 if (ret < 0)
@@ -942,45 +1061,21 @@ static int __init nft_compat_module_init(void)
942 } 1061 }
943 1062
944 return ret; 1063 return ret;
945
946err_target: 1064err_target:
947 nft_unregister_expr(&nft_target_type); 1065 nft_unregister_expr(&nft_target_type);
948err_match: 1066err_match:
949 nft_unregister_expr(&nft_match_type); 1067 nft_unregister_expr(&nft_match_type);
1068err_pernet:
1069 unregister_pernet_subsys(&nft_compat_net_ops);
950 return ret; 1070 return ret;
951} 1071}
952 1072
953static void __exit nft_compat_module_exit(void) 1073static void __exit nft_compat_module_exit(void)
954{ 1074{
955 struct nft_xt *xt, *next;
956
957 /* list should be empty here, it can be non-empty only in case there
958 * was an error that caused nft_xt expr to not be initialized fully
959 * and noone else requested the same expression later.
960 *
961 * In this case, the lists contain 0-refcount entries that still
962 * hold module reference.
963 */
964 list_for_each_entry_safe(xt, next, &nft_target_list, head) {
965 struct xt_target *target = xt->ops.data;
966
967 if (WARN_ON_ONCE(xt->refcnt))
968 continue;
969 module_put(target->me);
970 kfree(xt);
971 }
972
973 list_for_each_entry_safe(xt, next, &nft_match_list, head) {
974 struct xt_match *match = xt->ops.data;
975
976 if (WARN_ON_ONCE(xt->refcnt))
977 continue;
978 module_put(match->me);
979 kfree(xt);
980 }
981 nfnetlink_subsys_unregister(&nfnl_compat_subsys); 1075 nfnetlink_subsys_unregister(&nfnl_compat_subsys);
982 nft_unregister_expr(&nft_target_type); 1076 nft_unregister_expr(&nft_target_type);
983 nft_unregister_expr(&nft_match_type); 1077 nft_unregister_expr(&nft_match_type);
1078 unregister_pernet_subsys(&nft_compat_net_ops);
984} 1079}
985 1080
986MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); 1081MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index cbd51ed5a2d7..908e53ab47a4 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk)
52{ 52{
53 struct nr_sock *nr = nr_sk(sk); 53 struct nr_sock *nr = nr_sk(sk);
54 54
55 mod_timer(&nr->t1timer, jiffies + nr->t1); 55 sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
56} 56}
57 57
58void nr_start_t2timer(struct sock *sk) 58void nr_start_t2timer(struct sock *sk)
59{ 59{
60 struct nr_sock *nr = nr_sk(sk); 60 struct nr_sock *nr = nr_sk(sk);
61 61
62 mod_timer(&nr->t2timer, jiffies + nr->t2); 62 sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
63} 63}
64 64
65void nr_start_t4timer(struct sock *sk) 65void nr_start_t4timer(struct sock *sk)
66{ 66{
67 struct nr_sock *nr = nr_sk(sk); 67 struct nr_sock *nr = nr_sk(sk);
68 68
69 mod_timer(&nr->t4timer, jiffies + nr->t4); 69 sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
70} 70}
71 71
72void nr_start_idletimer(struct sock *sk) 72void nr_start_idletimer(struct sock *sk)
@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk)
74 struct nr_sock *nr = nr_sk(sk); 74 struct nr_sock *nr = nr_sk(sk);
75 75
76 if (nr->idle > 0) 76 if (nr->idle > 0)
77 mod_timer(&nr->idletimer, jiffies + nr->idle); 77 sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
78} 78}
79 79
80void nr_start_heartbeat(struct sock *sk) 80void nr_start_heartbeat(struct sock *sk)
81{ 81{
82 mod_timer(&sk->sk_timer, jiffies + 5 * HZ); 82 sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
83} 83}
84 84
85void nr_stop_t1timer(struct sock *sk) 85void nr_stop_t1timer(struct sock *sk)
86{ 86{
87 del_timer(&nr_sk(sk)->t1timer); 87 sk_stop_timer(sk, &nr_sk(sk)->t1timer);
88} 88}
89 89
90void nr_stop_t2timer(struct sock *sk) 90void nr_stop_t2timer(struct sock *sk)
91{ 91{
92 del_timer(&nr_sk(sk)->t2timer); 92 sk_stop_timer(sk, &nr_sk(sk)->t2timer);
93} 93}
94 94
95void nr_stop_t4timer(struct sock *sk) 95void nr_stop_t4timer(struct sock *sk)
96{ 96{
97 del_timer(&nr_sk(sk)->t4timer); 97 sk_stop_timer(sk, &nr_sk(sk)->t4timer);
98} 98}
99 99
100void nr_stop_idletimer(struct sock *sk) 100void nr_stop_idletimer(struct sock *sk)
101{ 101{
102 del_timer(&nr_sk(sk)->idletimer); 102 sk_stop_timer(sk, &nr_sk(sk)->idletimer);
103} 103}
104 104
105void nr_stop_heartbeat(struct sock *sk) 105void nr_stop_heartbeat(struct sock *sk)
106{ 106{
107 del_timer(&sk->sk_timer); 107 sk_stop_timer(sk, &sk->sk_timer);
108} 108}
109 109
110int nr_t1timer_running(struct sock *sk) 110int nr_t1timer_running(struct sock *sk)
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 77e9f85a2c92..f2ff21d7df08 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev)
850 850
851/* 851/*
852 * Route a frame to an appropriate AX.25 connection. 852 * Route a frame to an appropriate AX.25 connection.
853 * A NULL ax25_cb indicates an internally generated frame.
853 */ 854 */
854int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) 855int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
855{ 856{
@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
867 868
868 if (skb->len < ROSE_MIN_LEN) 869 if (skb->len < ROSE_MIN_LEN)
869 return res; 870 return res;
871
872 if (!ax25)
873 return rose_loopback_queue(skb, NULL);
874
870 frametype = skb->data[2]; 875 frametype = skb->data[2];
871 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 876 lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
872 if (frametype == ROSE_CALL_REQUEST && 877 if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index ed8e006dae85..6200cd2b4b99 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -280,7 +280,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
280 280
281 if (saddr) { 281 if (saddr) {
282 fl6->saddr = saddr->v6.sin6_addr; 282 fl6->saddr = saddr->v6.sin6_addr;
283 fl6->fl6_sport = saddr->v6.sin6_port; 283 if (!fl6->fl6_sport)
284 fl6->fl6_sport = saddr->v6.sin6_port;
284 285
285 pr_debug("src=%pI6 - ", &fl6->saddr); 286 pr_debug("src=%pI6 - ", &fl6->saddr);
286 } 287 }
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 4e0eeb113ef5..6abc8b274270 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -440,7 +440,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
440 } 440 }
441 if (saddr) { 441 if (saddr) {
442 fl4->saddr = saddr->v4.sin_addr.s_addr; 442 fl4->saddr = saddr->v4.sin_addr.s_addr;
443 fl4->fl4_sport = saddr->v4.sin_port; 443 if (!fl4->fl4_sport)
444 fl4->fl4_sport = saddr->v4.sin_port;
444 } 445 }
445 446
446 pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, 447 pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f4ac6c592e13..d05c57664e36 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -495,7 +495,10 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
495 * 495 *
496 * [INIT ACK back to where the INIT came from.] 496 * [INIT ACK back to where the INIT came from.]
497 */ 497 */
498 retval->transport = chunk->transport; 498 if (chunk->transport)
499 retval->transport =
500 sctp_assoc_lookup_paddr(asoc,
501 &chunk->transport->ipaddr);
499 502
500 retval->subh.init_hdr = 503 retval->subh.init_hdr =
501 sctp_addto_chunk(retval, sizeof(initack), &initack); 504 sctp_addto_chunk(retval, sizeof(initack), &initack);
@@ -642,8 +645,10 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
642 * 645 *
643 * [COOKIE ACK back to where the COOKIE ECHO came from.] 646 * [COOKIE ACK back to where the COOKIE ECHO came from.]
644 */ 647 */
645 if (retval && chunk) 648 if (retval && chunk && chunk->transport)
646 retval->transport = chunk->transport; 649 retval->transport =
650 sctp_assoc_lookup_paddr(asoc,
651 &chunk->transport->ipaddr);
647 652
648 return retval; 653 return retval;
649} 654}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 3892e7630f3a..80e0ae5534ec 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -585,9 +585,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
585 struct sctp_strreset_outreq *outreq = param.v; 585 struct sctp_strreset_outreq *outreq = param.v;
586 struct sctp_stream *stream = &asoc->stream; 586 struct sctp_stream *stream = &asoc->stream;
587 __u32 result = SCTP_STRRESET_DENIED; 587 __u32 result = SCTP_STRRESET_DENIED;
588 __u16 i, nums, flags = 0;
589 __be16 *str_p = NULL; 588 __be16 *str_p = NULL;
590 __u32 request_seq; 589 __u32 request_seq;
590 __u16 i, nums;
591 591
592 request_seq = ntohl(outreq->request_seq); 592 request_seq = ntohl(outreq->request_seq);
593 593
@@ -615,6 +615,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
615 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) 615 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
616 goto out; 616 goto out;
617 617
618 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
619 str_p = outreq->list_of_streams;
620 for (i = 0; i < nums; i++) {
621 if (ntohs(str_p[i]) >= stream->incnt) {
622 result = SCTP_STRRESET_ERR_WRONG_SSN;
623 goto out;
624 }
625 }
626
618 if (asoc->strreset_chunk) { 627 if (asoc->strreset_chunk) {
619 if (!sctp_chunk_lookup_strreset_param( 628 if (!sctp_chunk_lookup_strreset_param(
620 asoc, outreq->response_seq, 629 asoc, outreq->response_seq,
@@ -637,32 +646,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
637 sctp_chunk_put(asoc->strreset_chunk); 646 sctp_chunk_put(asoc->strreset_chunk);
638 asoc->strreset_chunk = NULL; 647 asoc->strreset_chunk = NULL;
639 } 648 }
640
641 flags = SCTP_STREAM_RESET_INCOMING_SSN;
642 } 649 }
643 650
644 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); 651 if (nums)
645 if (nums) {
646 str_p = outreq->list_of_streams;
647 for (i = 0; i < nums; i++) {
648 if (ntohs(str_p[i]) >= stream->incnt) {
649 result = SCTP_STRRESET_ERR_WRONG_SSN;
650 goto out;
651 }
652 }
653
654 for (i = 0; i < nums; i++) 652 for (i = 0; i < nums; i++)
655 SCTP_SI(stream, ntohs(str_p[i]))->mid = 0; 653 SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
656 } else { 654 else
657 for (i = 0; i < stream->incnt; i++) 655 for (i = 0; i < stream->incnt; i++)
658 SCTP_SI(stream, i)->mid = 0; 656 SCTP_SI(stream, i)->mid = 0;
659 }
660 657
661 result = SCTP_STRRESET_PERFORMED; 658 result = SCTP_STRRESET_PERFORMED;
662 659
663 *evp = sctp_ulpevent_make_stream_reset_event(asoc, 660 *evp = sctp_ulpevent_make_stream_reset_event(asoc,
664 flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p, 661 SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
665 GFP_ATOMIC);
666 662
667out: 663out:
668 sctp_update_strreset_result(asoc, result); 664 sctp_update_strreset_result(asoc, result);
@@ -738,9 +734,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
738 734
739 result = SCTP_STRRESET_PERFORMED; 735 result = SCTP_STRRESET_PERFORMED;
740 736
741 *evp = sctp_ulpevent_make_stream_reset_event(asoc,
742 SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
743
744out: 737out:
745 sctp_update_strreset_result(asoc, result); 738 sctp_update_strreset_result(asoc, result);
746err: 739err:
@@ -873,6 +866,14 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
873 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) 866 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
874 goto out; 867 goto out;
875 868
869 in = ntohs(addstrm->number_of_streams);
870 incnt = stream->incnt + in;
871 if (!in || incnt > SCTP_MAX_STREAM)
872 goto out;
873
874 if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
875 goto out;
876
876 if (asoc->strreset_chunk) { 877 if (asoc->strreset_chunk) {
877 if (!sctp_chunk_lookup_strreset_param( 878 if (!sctp_chunk_lookup_strreset_param(
878 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) { 879 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -896,14 +897,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
896 } 897 }
897 } 898 }
898 899
899 in = ntohs(addstrm->number_of_streams);
900 incnt = stream->incnt + in;
901 if (!in || incnt > SCTP_MAX_STREAM)
902 goto out;
903
904 if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
905 goto out;
906
907 stream->incnt = incnt; 900 stream->incnt = incnt;
908 901
909 result = SCTP_STRRESET_PERFORMED; 902 result = SCTP_STRRESET_PERFORMED;
@@ -973,9 +966,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
973 966
974 result = SCTP_STRRESET_PERFORMED; 967 result = SCTP_STRRESET_PERFORMED;
975 968
976 *evp = sctp_ulpevent_make_stream_change_event(asoc,
977 0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
978
979out: 969out:
980 sctp_update_strreset_result(asoc, result); 970 sctp_update_strreset_result(asoc, result);
981err: 971err:
@@ -1036,10 +1026,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
1036 sout->mid_uo = 0; 1026 sout->mid_uo = 0;
1037 } 1027 }
1038 } 1028 }
1039
1040 flags = SCTP_STREAM_RESET_OUTGOING_SSN;
1041 } 1029 }
1042 1030
1031 flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
1032
1043 for (i = 0; i < stream->outcnt; i++) 1033 for (i = 0; i < stream->outcnt; i++)
1044 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; 1034 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1045 1035
@@ -1058,6 +1048,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
1058 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 1048 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
1059 sizeof(__u16); 1049 sizeof(__u16);
1060 1050
1051 flags |= SCTP_STREAM_RESET_INCOMING_SSN;
1052
1061 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, 1053 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
1062 nums, str_p, GFP_ATOMIC); 1054 nums, str_p, GFP_ATOMIC);
1063 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) { 1055 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index cf51b8f9b15f..1f200119268c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
537 DMA_TO_DEVICE); 537 DMA_TO_DEVICE);
538} 538}
539 539
540/* If the xdr_buf has more elements than the device can
541 * transmit in a single RDMA Send, then the reply will
542 * have to be copied into a bounce buffer.
543 */
544static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
545 struct xdr_buf *xdr,
546 __be32 *wr_lst)
547{
548 int elements;
549
550 /* xdr->head */
551 elements = 1;
552
553 /* xdr->pages */
554 if (!wr_lst) {
555 unsigned int remaining;
556 unsigned long pageoff;
557
558 pageoff = xdr->page_base & ~PAGE_MASK;
559 remaining = xdr->page_len;
560 while (remaining) {
561 ++elements;
562 remaining -= min_t(u32, PAGE_SIZE - pageoff,
563 remaining);
564 pageoff = 0;
565 }
566 }
567
568 /* xdr->tail */
569 if (xdr->tail[0].iov_len)
570 ++elements;
571
572 /* assume 1 SGE is needed for the transport header */
573 return elements >= rdma->sc_max_send_sges;
574}
575
576/* The device is not capable of sending the reply directly.
577 * Assemble the elements of @xdr into the transport header
578 * buffer.
579 */
580static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
581 struct svc_rdma_send_ctxt *ctxt,
582 struct xdr_buf *xdr, __be32 *wr_lst)
583{
584 unsigned char *dst, *tailbase;
585 unsigned int taillen;
586
587 dst = ctxt->sc_xprt_buf;
588 dst += ctxt->sc_sges[0].length;
589
590 memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
591 dst += xdr->head[0].iov_len;
592
593 tailbase = xdr->tail[0].iov_base;
594 taillen = xdr->tail[0].iov_len;
595 if (wr_lst) {
596 u32 xdrpad;
597
598 xdrpad = xdr_padsize(xdr->page_len);
599 if (taillen && xdrpad) {
600 tailbase += xdrpad;
601 taillen -= xdrpad;
602 }
603 } else {
604 unsigned int len, remaining;
605 unsigned long pageoff;
606 struct page **ppages;
607
608 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
609 pageoff = xdr->page_base & ~PAGE_MASK;
610 remaining = xdr->page_len;
611 while (remaining) {
612 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
613
614 memcpy(dst, page_address(*ppages), len);
615 remaining -= len;
616 dst += len;
617 pageoff = 0;
618 }
619 }
620
621 if (taillen)
622 memcpy(dst, tailbase, taillen);
623
624 ctxt->sc_sges[0].length += xdr->len;
625 ib_dma_sync_single_for_device(rdma->sc_pd->device,
626 ctxt->sc_sges[0].addr,
627 ctxt->sc_sges[0].length,
628 DMA_TO_DEVICE);
629
630 return 0;
631}
632
540/* svc_rdma_map_reply_msg - Map the buffer holding RPC message 633/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
541 * @rdma: controlling transport 634 * @rdma: controlling transport
542 * @ctxt: send_ctxt for the Send WR 635 * @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
559 u32 xdr_pad; 652 u32 xdr_pad;
560 int ret; 653 int ret;
561 654
562 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 655 if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
563 return -EIO; 656 return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
657
658 ++ctxt->sc_cur_sge_no;
564 ret = svc_rdma_dma_map_buf(rdma, ctxt, 659 ret = svc_rdma_dma_map_buf(rdma, ctxt,
565 xdr->head[0].iov_base, 660 xdr->head[0].iov_base,
566 xdr->head[0].iov_len); 661 xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
591 while (remaining) { 686 while (remaining) {
592 len = min_t(u32, PAGE_SIZE - page_off, remaining); 687 len = min_t(u32, PAGE_SIZE - page_off, remaining);
593 688
594 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 689 ++ctxt->sc_cur_sge_no;
595 return -EIO;
596 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, 690 ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
597 page_off, len); 691 page_off, len);
598 if (ret < 0) 692 if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
606 len = xdr->tail[0].iov_len; 700 len = xdr->tail[0].iov_len;
607tail: 701tail:
608 if (len) { 702 if (len) {
609 if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) 703 ++ctxt->sc_cur_sge_no;
610 return -EIO;
611 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); 704 ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
612 if (ret < 0) 705 if (ret < 0)
613 return ret; 706 return ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 924c17d46903..57f86c63a463 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
419 /* Transport header, head iovec, tail iovec */ 419 /* Transport header, head iovec, tail iovec */
420 newxprt->sc_max_send_sges = 3; 420 newxprt->sc_max_send_sges = 3;
421 /* Add one SGE per page list entry */ 421 /* Add one SGE per page list entry */
422 newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; 422 newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { 423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
424 pr_err("svcrdma: too few Send SGEs available (%d needed)\n", 424 newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
425 newxprt->sc_max_send_sges);
426 goto errout;
427 }
428 newxprt->sc_max_req_size = svcrdma_max_req_size; 425 newxprt->sc_max_req_size = svcrdma_max_req_size;
429 newxprt->sc_max_requests = svcrdma_max_requests; 426 newxprt->sc_max_requests = svcrdma_max_requests;
430 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; 427 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 11cdc8f7db63..bf5b54b513bc 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk,
439 struct scatterlist *sge = sk_msg_elem(msg_en, start); 439 struct scatterlist *sge = sk_msg_elem(msg_en, start);
440 int rc; 440 int rc;
441 441
442 memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
443
442 sge->offset += tls_ctx->tx.prepend_size; 444 sge->offset += tls_ctx->tx.prepend_size;
443 sge->length -= tls_ctx->tx.prepend_size; 445 sge->length -= tls_ctx->tx.prepend_size;
444 446
@@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk,
448 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); 450 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
449 aead_request_set_crypt(aead_req, rec->sg_aead_in, 451 aead_request_set_crypt(aead_req, rec->sg_aead_in,
450 rec->sg_aead_out, 452 rec->sg_aead_out,
451 data_len, tls_ctx->tx.iv); 453 data_len, rec->iv_data);
452 454
453 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 455 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
454 tls_encrypt_done, sk); 456 tls_encrypt_done, sk);
@@ -1792,7 +1794,9 @@ void tls_sw_free_resources_tx(struct sock *sk)
1792 if (atomic_read(&ctx->encrypt_pending)) 1794 if (atomic_read(&ctx->encrypt_pending))
1793 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1795 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1794 1796
1797 release_sock(sk);
1795 cancel_delayed_work_sync(&ctx->tx_work.work); 1798 cancel_delayed_work_sync(&ctx->tx_work.work);
1799 lock_sock(sk);
1796 1800
1797 /* Tx whatever records we can transmit and abandon the rest */ 1801 /* Tx whatever records we can transmit and abandon the rest */
1798 tls_tx_records(sk, -1); 1802 tls_tx_records(sk, -1);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5e49492d5911..74150ad95823 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -555,7 +555,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
555 }, 555 },
556 [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1), 556 [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
557 [NL80211_ATTR_PEER_MEASUREMENTS] = 557 [NL80211_ATTR_PEER_MEASUREMENTS] =
558 NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX, 558 NLA_POLICY_NESTED(NL80211_PMSR_ATTR_MAX,
559 nl80211_pmsr_attr_policy), 559 nl80211_pmsr_attr_policy),
560}; 560};
561 561
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ecfb1a06dbb2..dd58b9909ac9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1024,8 +1024,13 @@ static void regdb_fw_cb(const struct firmware *fw, void *context)
1024 } 1024 }
1025 1025
1026 rtnl_lock(); 1026 rtnl_lock();
1027 if (WARN_ON(regdb && !IS_ERR(regdb))) { 1027 if (regdb && !IS_ERR(regdb)) {
1028 /* just restore and free new db */ 1028 /* negative case - a bug
1029 * positive case - can happen due to race in case of multiple cb's in
1030 * queue, due to usage of asynchronous callback
1031 *
1032 * Either case, just restore and free new db.
1033 */
1029 } else if (set_error) { 1034 } else if (set_error) {
1030 regdb = ERR_PTR(set_error); 1035 regdb = ERR_PTR(set_error);
1031 } else if (fw) { 1036 } else if (fw) {
@@ -1255,7 +1260,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
1255 * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), 1260 * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
1256 * however it is safe for now to assume that a frequency rule should not be 1261 * however it is safe for now to assume that a frequency rule should not be
1257 * part of a frequency's band if the start freq or end freq are off by more 1262 * part of a frequency's band if the start freq or end freq are off by more
1258 * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the 1263 * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
1259 * 60 GHz band. 1264 * 60 GHz band.
1260 * This resolution can be lowered and should be considered as we add 1265 * This resolution can be lowered and should be considered as we add
1261 * regulatory rule support for other "bands". 1266 * regulatory rule support for other "bands".
@@ -1270,7 +1275,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
1270 * with the Channel starting frequency above 45 GHz. 1275 * with the Channel starting frequency above 45 GHz.
1271 */ 1276 */
1272 u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ? 1277 u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
1273 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ; 1278 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
1274 if (abs(freq_khz - freq_range->start_freq_khz) <= limit) 1279 if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
1275 return true; 1280 return true;
1276 if (abs(freq_khz - freq_range->end_freq_khz) <= limit) 1281 if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 934492bad8e0..ba0a4048c846 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -680,16 +680,6 @@ static void xfrm_hash_resize(struct work_struct *work)
680 mutex_unlock(&hash_resize_mutex); 680 mutex_unlock(&hash_resize_mutex);
681} 681}
682 682
683static void xfrm_hash_reset_inexact_table(struct net *net)
684{
685 struct xfrm_pol_inexact_bin *b;
686
687 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
688
689 list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins)
690 INIT_HLIST_HEAD(&b->hhead);
691}
692
693/* Make sure *pol can be inserted into fastbin. 683/* Make sure *pol can be inserted into fastbin.
694 * Useful to check that later insert requests will be sucessful 684 * Useful to check that later insert requests will be sucessful
695 * (provided xfrm_policy_lock is held throughout). 685 * (provided xfrm_policy_lock is held throughout).
@@ -833,13 +823,13 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
833 u16 family) 823 u16 family)
834{ 824{
835 unsigned int matched_s, matched_d; 825 unsigned int matched_s, matched_d;
836 struct hlist_node *newpos = NULL;
837 struct xfrm_policy *policy, *p; 826 struct xfrm_policy *policy, *p;
838 827
839 matched_s = 0; 828 matched_s = 0;
840 matched_d = 0; 829 matched_d = 0;
841 830
842 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 831 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
832 struct hlist_node *newpos = NULL;
843 bool matches_s, matches_d; 833 bool matches_s, matches_d;
844 834
845 if (!policy->bydst_reinsert) 835 if (!policy->bydst_reinsert)
@@ -849,16 +839,19 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
849 839
850 policy->bydst_reinsert = false; 840 policy->bydst_reinsert = false;
851 hlist_for_each_entry(p, &n->hhead, bydst) { 841 hlist_for_each_entry(p, &n->hhead, bydst) {
852 if (policy->priority >= p->priority) 842 if (policy->priority > p->priority)
843 newpos = &p->bydst;
844 else if (policy->priority == p->priority &&
845 policy->pos > p->pos)
853 newpos = &p->bydst; 846 newpos = &p->bydst;
854 else 847 else
855 break; 848 break;
856 } 849 }
857 850
858 if (newpos) 851 if (newpos)
859 hlist_add_behind(&policy->bydst, newpos); 852 hlist_add_behind_rcu(&policy->bydst, newpos);
860 else 853 else
861 hlist_add_head(&policy->bydst, &n->hhead); 854 hlist_add_head_rcu(&policy->bydst, &n->hhead);
862 855
863 /* paranoia checks follow. 856 /* paranoia checks follow.
864 * Check that the reinserted policy matches at least 857 * Check that the reinserted policy matches at least
@@ -893,12 +886,13 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
893 struct rb_root *new, 886 struct rb_root *new,
894 u16 family) 887 u16 family)
895{ 888{
896 struct rb_node **p, *parent = NULL;
897 struct xfrm_pol_inexact_node *node; 889 struct xfrm_pol_inexact_node *node;
890 struct rb_node **p, *parent;
898 891
899 /* we should not have another subtree here */ 892 /* we should not have another subtree here */
900 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 893 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
901 894restart:
895 parent = NULL;
902 p = &new->rb_node; 896 p = &new->rb_node;
903 while (*p) { 897 while (*p) {
904 u8 prefixlen; 898 u8 prefixlen;
@@ -918,12 +912,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
918 } else { 912 } else {
919 struct xfrm_policy *tmp; 913 struct xfrm_policy *tmp;
920 914
921 hlist_for_each_entry(tmp, &node->hhead, bydst) 915 hlist_for_each_entry(tmp, &n->hhead, bydst) {
922 tmp->bydst_reinsert = true;
923 hlist_for_each_entry(tmp, &n->hhead, bydst)
924 tmp->bydst_reinsert = true; 916 tmp->bydst_reinsert = true;
917 hlist_del_rcu(&tmp->bydst);
918 }
925 919
926 INIT_HLIST_HEAD(&node->hhead);
927 xfrm_policy_inexact_list_reinsert(net, node, family); 920 xfrm_policy_inexact_list_reinsert(net, node, family);
928 921
929 if (node->prefixlen == n->prefixlen) { 922 if (node->prefixlen == n->prefixlen) {
@@ -935,8 +928,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net,
935 kfree_rcu(n, rcu); 928 kfree_rcu(n, rcu);
936 n = node; 929 n = node;
937 n->prefixlen = prefixlen; 930 n->prefixlen = prefixlen;
938 *p = new->rb_node; 931 goto restart;
939 parent = NULL;
940 } 932 }
941 } 933 }
942 934
@@ -965,12 +957,11 @@ static void xfrm_policy_inexact_node_merge(struct net *net,
965 family); 957 family);
966 } 958 }
967 959
968 hlist_for_each_entry(tmp, &v->hhead, bydst) 960 hlist_for_each_entry(tmp, &v->hhead, bydst) {
969 tmp->bydst_reinsert = true;
970 hlist_for_each_entry(tmp, &n->hhead, bydst)
971 tmp->bydst_reinsert = true; 961 tmp->bydst_reinsert = true;
962 hlist_del_rcu(&tmp->bydst);
963 }
972 964
973 INIT_HLIST_HEAD(&n->hhead);
974 xfrm_policy_inexact_list_reinsert(net, n, family); 965 xfrm_policy_inexact_list_reinsert(net, n, family);
975} 966}
976 967
@@ -1235,6 +1226,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1235 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1226 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1236 1227
1237 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1228 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1229 write_seqcount_begin(&xfrm_policy_hash_generation);
1238 1230
1239 /* make sure that we can insert the indirect policies again before 1231 /* make sure that we can insert the indirect policies again before
1240 * we start with destructive action. 1232 * we start with destructive action.
@@ -1278,10 +1270,14 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1278 } 1270 }
1279 1271
1280 /* reset the bydst and inexact table in all directions */ 1272 /* reset the bydst and inexact table in all directions */
1281 xfrm_hash_reset_inexact_table(net);
1282
1283 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1273 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1284 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 1274 struct hlist_node *n;
1275
1276 hlist_for_each_entry_safe(policy, n,
1277 &net->xfrm.policy_inexact[dir],
1278 bydst_inexact_list)
1279 hlist_del_init(&policy->bydst_inexact_list);
1280
1285 hmask = net->xfrm.policy_bydst[dir].hmask; 1281 hmask = net->xfrm.policy_bydst[dir].hmask;
1286 odst = net->xfrm.policy_bydst[dir].table; 1282 odst = net->xfrm.policy_bydst[dir].table;
1287 for (i = hmask; i >= 0; i--) 1283 for (i = hmask; i >= 0; i--)
@@ -1313,6 +1309,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1313 newpos = NULL; 1309 newpos = NULL;
1314 chain = policy_hash_bysel(net, &policy->selector, 1310 chain = policy_hash_bysel(net, &policy->selector,
1315 policy->family, dir); 1311 policy->family, dir);
1312
1313 hlist_del_rcu(&policy->bydst);
1314
1316 if (!chain) { 1315 if (!chain) {
1317 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1316 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1318 1317
@@ -1334,6 +1333,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
1334 1333
1335out_unlock: 1334out_unlock:
1336 __xfrm_policy_inexact_flush(net); 1335 __xfrm_policy_inexact_flush(net);
1336 write_seqcount_end(&xfrm_policy_hash_generation);
1337 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1337 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1338 1338
1339 mutex_unlock(&hash_resize_mutex); 1339 mutex_unlock(&hash_resize_mutex);
@@ -2600,7 +2600,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2600 dst_copy_metrics(dst1, dst); 2600 dst_copy_metrics(dst1, dst);
2601 2601
2602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2603 __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2603 __u32 mark = 0;
2604
2605 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2606 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2604 2607
2605 family = xfrm[i]->props.family; 2608 family = xfrm[i]->props.family;
2606 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, 2609 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 277c1c46fe94..c6d26afcf89d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1488 if (!ut[i].family) 1488 if (!ut[i].family)
1489 ut[i].family = family; 1489 ut[i].family = family;
1490 1490
1491 if ((ut[i].mode == XFRM_MODE_TRANSPORT) && 1491 switch (ut[i].mode) {
1492 (ut[i].family != prev_family)) 1492 case XFRM_MODE_TUNNEL:
1493 return -EINVAL; 1493 case XFRM_MODE_BEET:
1494 1494 break;
1495 default:
1496 if (ut[i].family != prev_family)
1497 return -EINVAL;
1498 break;
1499 }
1495 if (ut[i].mode >= XFRM_MODE_MAX) 1500 if (ut[i].mode >= XFRM_MODE_MAX)
1496 return -EINVAL; 1501 return -EINVAL;
1497 1502
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de0ffda..11975ec8d566 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1444,7 +1444,10 @@ check:
1444 new = aa_label_merge(label, target, GFP_KERNEL); 1444 new = aa_label_merge(label, target, GFP_KERNEL);
1445 if (IS_ERR_OR_NULL(new)) { 1445 if (IS_ERR_OR_NULL(new)) {
1446 info = "failed to build target label"; 1446 info = "failed to build target label";
1447 error = PTR_ERR(new); 1447 if (!new)
1448 error = -ENOMEM;
1449 else
1450 error = PTR_ERR(new);
1448 new = NULL; 1451 new = NULL;
1449 perms.allow = 0; 1452 perms.allow = 0;
1450 goto audit; 1453 goto audit;
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 2c010874329f..8db1731d046a 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -1599,12 +1599,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv,
1599 return apparmor_ip_postroute(priv, skb, state); 1599 return apparmor_ip_postroute(priv, skb, state);
1600} 1600}
1601 1601
1602#if IS_ENABLED(CONFIG_IPV6)
1602static unsigned int apparmor_ipv6_postroute(void *priv, 1603static unsigned int apparmor_ipv6_postroute(void *priv,
1603 struct sk_buff *skb, 1604 struct sk_buff *skb,
1604 const struct nf_hook_state *state) 1605 const struct nf_hook_state *state)
1605{ 1606{
1606 return apparmor_ip_postroute(priv, skb, state); 1607 return apparmor_ip_postroute(priv, skb, state);
1607} 1608}
1609#endif
1608 1610
1609static const struct nf_hook_ops apparmor_nf_ops[] = { 1611static const struct nf_hook_ops apparmor_nf_ops[] = {
1610 { 1612 {
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a5b09e75e787..f7d2b373da0a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -541,7 +541,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
541{ 541{
542 /* first let's check the buffer parameter's */ 542 /* first let's check the buffer parameter's */
543 if (params->buffer.fragment_size == 0 || 543 if (params->buffer.fragment_size == 0 ||
544 params->buffer.fragments > INT_MAX / params->buffer.fragment_size) 544 params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
545 params->buffer.fragments == 0)
545 return -EINVAL; 546 return -EINVAL;
546 547
547 /* now codec parameters */ 548 /* now codec parameters */
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 40013b26f671..6c99fa8ac5fa 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2112,6 +2112,13 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2112 return 0; 2112 return 0;
2113} 2113}
2114 2114
2115/* allow waiting for a capture stream that hasn't been started */
2116#if IS_ENABLED(CONFIG_SND_PCM_OSS)
2117#define wait_capture_start(substream) ((substream)->oss.oss)
2118#else
2119#define wait_capture_start(substream) false
2120#endif
2121
2115/* the common loop for read/write data */ 2122/* the common loop for read/write data */
2116snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, 2123snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2117 void *data, bool interleaved, 2124 void *data, bool interleaved,
@@ -2182,7 +2189,7 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2182 err = snd_pcm_start(substream); 2189 err = snd_pcm_start(substream);
2183 if (err < 0) 2190 if (err < 0)
2184 goto _end_unlock; 2191 goto _end_unlock;
2185 } else { 2192 } else if (!wait_capture_start(substream)) {
2186 /* nothing to do */ 2193 /* nothing to do */
2187 err = 0; 2194 err = 0;
2188 goto _end_unlock; 2195 goto _end_unlock;
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 9174f1b3a987..1ec706ced75c 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev)
115 err = snd_hda_codec_build_controls(codec); 115 err = snd_hda_codec_build_controls(codec);
116 if (err < 0) 116 if (err < 0)
117 goto error_module; 117 goto error_module;
118 if (codec->card->registered) { 118 /* only register after the bus probe finished; otherwise it's racy */
119 if (!codec->bus->bus_probing && codec->card->registered) {
119 err = snd_card_register(codec->card); 120 err = snd_card_register(codec->card);
120 if (err < 0) 121 if (err < 0)
121 goto error_module; 122 goto error_module;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e784130ea4e0..e5c49003e75f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip)
2185 int dev = chip->dev_index; 2185 int dev = chip->dev_index;
2186 int err; 2186 int err;
2187 2187
2188 to_hda_bus(bus)->bus_probing = 1;
2188 hda->probe_continued = 1; 2189 hda->probe_continued = 1;
2189 2190
2190 /* bind with i915 if needed */ 2191 /* bind with i915 if needed */
@@ -2269,6 +2270,7 @@ out_free:
2269 if (err < 0) 2270 if (err < 0)
2270 hda->init_failed = 1; 2271 hda->init_failed = 1;
2271 complete_all(&hda->probe_wait); 2272 complete_all(&hda->probe_wait);
2273 to_hda_bus(bus)->bus_probing = 0;
2272 return err; 2274 return err;
2273} 2275}
2274 2276
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index e5bdbc245682..29882bda7632 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec)
8451 ca0132_exit_chip(codec); 8451 ca0132_exit_chip(codec);
8452 8452
8453 snd_hda_power_down(codec); 8453 snd_hda_power_down(codec);
8454 if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) 8454#ifdef CONFIG_PCI
8455 if (spec->mem_base)
8455 pci_iounmap(codec->bus->pci, spec->mem_base); 8456 pci_iounmap(codec->bus->pci, spec->mem_base);
8457#endif
8456 kfree(spec->spec_init_verbs); 8458 kfree(spec->spec_init_verbs);
8457 kfree(codec->spec); 8459 kfree(codec->spec);
8458} 8460}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 51cc6589443f..152f54137082 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
931 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), 931 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
932 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), 932 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
933 SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), 933 SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
934 SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
934 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), 935 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
935 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), 936 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
936 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), 937 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0b3e7a18ca78..6df758adff84 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -117,6 +117,7 @@ struct alc_spec {
117 int codec_variant; /* flag for other variants */ 117 int codec_variant; /* flag for other variants */
118 unsigned int has_alc5505_dsp:1; 118 unsigned int has_alc5505_dsp:1;
119 unsigned int no_depop_delay:1; 119 unsigned int no_depop_delay:1;
120 unsigned int done_hp_init:1;
120 121
121 /* for PLL fix */ 122 /* for PLL fix */
122 hda_nid_t pll_nid; 123 hda_nid_t pll_nid;
@@ -514,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
514 } 515 }
515} 516}
516 517
518/* get a primary headphone pin if available */
519static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
520{
521 if (spec->gen.autocfg.hp_pins[0])
522 return spec->gen.autocfg.hp_pins[0];
523 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
524 return spec->gen.autocfg.line_out_pins[0];
525 return 0;
526}
517 527
518/* 528/*
519 * Realtek SSID verification 529 * Realtek SSID verification
@@ -724,9 +734,7 @@ do_sku:
724 * 15 : 1 --> enable the function "Mute internal speaker 734 * 15 : 1 --> enable the function "Mute internal speaker
725 * when the external headphone out jack is plugged" 735 * when the external headphone out jack is plugged"
726 */ 736 */
727 if (!spec->gen.autocfg.hp_pins[0] && 737 if (!alc_get_hp_pin(spec)) {
728 !(spec->gen.autocfg.line_out_pins[0] &&
729 spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
730 hda_nid_t nid; 738 hda_nid_t nid;
731 tmp = (ass >> 11) & 0x3; /* HP to chassis */ 739 tmp = (ass >> 11) & 0x3; /* HP to chassis */
732 nid = ports[tmp]; 740 nid = ports[tmp];
@@ -2958,7 +2966,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
2958static void alc282_init(struct hda_codec *codec) 2966static void alc282_init(struct hda_codec *codec)
2959{ 2967{
2960 struct alc_spec *spec = codec->spec; 2968 struct alc_spec *spec = codec->spec;
2961 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 2969 hda_nid_t hp_pin = alc_get_hp_pin(spec);
2962 bool hp_pin_sense; 2970 bool hp_pin_sense;
2963 int coef78; 2971 int coef78;
2964 2972
@@ -2995,7 +3003,7 @@ static void alc282_init(struct hda_codec *codec)
2995static void alc282_shutup(struct hda_codec *codec) 3003static void alc282_shutup(struct hda_codec *codec)
2996{ 3004{
2997 struct alc_spec *spec = codec->spec; 3005 struct alc_spec *spec = codec->spec;
2998 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3006 hda_nid_t hp_pin = alc_get_hp_pin(spec);
2999 bool hp_pin_sense; 3007 bool hp_pin_sense;
3000 int coef78; 3008 int coef78;
3001 3009
@@ -3073,14 +3081,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
3073static void alc283_init(struct hda_codec *codec) 3081static void alc283_init(struct hda_codec *codec)
3074{ 3082{
3075 struct alc_spec *spec = codec->spec; 3083 struct alc_spec *spec = codec->spec;
3076 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3084 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3077 bool hp_pin_sense; 3085 bool hp_pin_sense;
3078 3086
3079 if (!spec->gen.autocfg.hp_outs) {
3080 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
3081 hp_pin = spec->gen.autocfg.line_out_pins[0];
3082 }
3083
3084 alc283_restore_default_value(codec); 3087 alc283_restore_default_value(codec);
3085 3088
3086 if (!hp_pin) 3089 if (!hp_pin)
@@ -3114,14 +3117,9 @@ static void alc283_init(struct hda_codec *codec)
3114static void alc283_shutup(struct hda_codec *codec) 3117static void alc283_shutup(struct hda_codec *codec)
3115{ 3118{
3116 struct alc_spec *spec = codec->spec; 3119 struct alc_spec *spec = codec->spec;
3117 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3120 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3118 bool hp_pin_sense; 3121 bool hp_pin_sense;
3119 3122
3120 if (!spec->gen.autocfg.hp_outs) {
3121 if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
3122 hp_pin = spec->gen.autocfg.line_out_pins[0];
3123 }
3124
3125 if (!hp_pin) { 3123 if (!hp_pin) {
3126 alc269_shutup(codec); 3124 alc269_shutup(codec);
3127 return; 3125 return;
@@ -3155,7 +3153,7 @@ static void alc283_shutup(struct hda_codec *codec)
3155static void alc256_init(struct hda_codec *codec) 3153static void alc256_init(struct hda_codec *codec)
3156{ 3154{
3157 struct alc_spec *spec = codec->spec; 3155 struct alc_spec *spec = codec->spec;
3158 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3156 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3159 bool hp_pin_sense; 3157 bool hp_pin_sense;
3160 3158
3161 if (!hp_pin) 3159 if (!hp_pin)
@@ -3191,7 +3189,7 @@ static void alc256_init(struct hda_codec *codec)
3191static void alc256_shutup(struct hda_codec *codec) 3189static void alc256_shutup(struct hda_codec *codec)
3192{ 3190{
3193 struct alc_spec *spec = codec->spec; 3191 struct alc_spec *spec = codec->spec;
3194 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3192 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3195 bool hp_pin_sense; 3193 bool hp_pin_sense;
3196 3194
3197 if (!hp_pin) { 3195 if (!hp_pin) {
@@ -3227,7 +3225,7 @@ static void alc256_shutup(struct hda_codec *codec)
3227static void alc225_init(struct hda_codec *codec) 3225static void alc225_init(struct hda_codec *codec)
3228{ 3226{
3229 struct alc_spec *spec = codec->spec; 3227 struct alc_spec *spec = codec->spec;
3230 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3228 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3231 bool hp1_pin_sense, hp2_pin_sense; 3229 bool hp1_pin_sense, hp2_pin_sense;
3232 3230
3233 if (!hp_pin) 3231 if (!hp_pin)
@@ -3270,7 +3268,7 @@ static void alc225_init(struct hda_codec *codec)
3270static void alc225_shutup(struct hda_codec *codec) 3268static void alc225_shutup(struct hda_codec *codec)
3271{ 3269{
3272 struct alc_spec *spec = codec->spec; 3270 struct alc_spec *spec = codec->spec;
3273 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3271 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3274 bool hp1_pin_sense, hp2_pin_sense; 3272 bool hp1_pin_sense, hp2_pin_sense;
3275 3273
3276 if (!hp_pin) { 3274 if (!hp_pin) {
@@ -3314,7 +3312,7 @@ static void alc225_shutup(struct hda_codec *codec)
3314static void alc_default_init(struct hda_codec *codec) 3312static void alc_default_init(struct hda_codec *codec)
3315{ 3313{
3316 struct alc_spec *spec = codec->spec; 3314 struct alc_spec *spec = codec->spec;
3317 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3315 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3318 bool hp_pin_sense; 3316 bool hp_pin_sense;
3319 3317
3320 if (!hp_pin) 3318 if (!hp_pin)
@@ -3343,7 +3341,7 @@ static void alc_default_init(struct hda_codec *codec)
3343static void alc_default_shutup(struct hda_codec *codec) 3341static void alc_default_shutup(struct hda_codec *codec)
3344{ 3342{
3345 struct alc_spec *spec = codec->spec; 3343 struct alc_spec *spec = codec->spec;
3346 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 3344 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3347 bool hp_pin_sense; 3345 bool hp_pin_sense;
3348 3346
3349 if (!hp_pin) { 3347 if (!hp_pin) {
@@ -3372,6 +3370,48 @@ static void alc_default_shutup(struct hda_codec *codec)
3372 snd_hda_shutup_pins(codec); 3370 snd_hda_shutup_pins(codec);
3373} 3371}
3374 3372
3373static void alc294_hp_init(struct hda_codec *codec)
3374{
3375 struct alc_spec *spec = codec->spec;
3376 hda_nid_t hp_pin = alc_get_hp_pin(spec);
3377 int i, val;
3378
3379 if (!hp_pin)
3380 return;
3381
3382 snd_hda_codec_write(codec, hp_pin, 0,
3383 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
3384
3385 msleep(100);
3386
3387 snd_hda_codec_write(codec, hp_pin, 0,
3388 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
3389
3390 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
3391 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
3392
3393 /* Wait for depop procedure finish */
3394 val = alc_read_coefex_idx(codec, 0x58, 0x01);
3395 for (i = 0; i < 20 && val & 0x0080; i++) {
3396 msleep(50);
3397 val = alc_read_coefex_idx(codec, 0x58, 0x01);
3398 }
3399 /* Set HP depop to auto mode */
3400 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
3401 msleep(50);
3402}
3403
3404static void alc294_init(struct hda_codec *codec)
3405{
3406 struct alc_spec *spec = codec->spec;
3407
3408 if (!spec->done_hp_init) {
3409 alc294_hp_init(codec);
3410 spec->done_hp_init = true;
3411 }
3412 alc_default_init(codec);
3413}
3414
3375static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, 3415static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
3376 unsigned int val) 3416 unsigned int val)
3377{ 3417{
@@ -4737,7 +4777,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
4737 struct alc_spec *spec = codec->spec; 4777 struct alc_spec *spec = codec->spec;
4738 4778
4739 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; 4779 hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
4740 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; 4780 hda_nid_t hp_pin = alc_get_hp_pin(spec);
4741 4781
4742 int new_headset_mode; 4782 int new_headset_mode;
4743 4783
@@ -5016,7 +5056,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
5016static void alc_shutup_dell_xps13(struct hda_codec *codec) 5056static void alc_shutup_dell_xps13(struct hda_codec *codec)
5017{ 5057{
5018 struct alc_spec *spec = codec->spec; 5058 struct alc_spec *spec = codec->spec;
5019 int hp_pin = spec->gen.autocfg.hp_pins[0]; 5059 int hp_pin = alc_get_hp_pin(spec);
5020 5060
5021 /* Prevent pop noises when headphones are plugged in */ 5061 /* Prevent pop noises when headphones are plugged in */
5022 snd_hda_codec_write(codec, hp_pin, 0, 5062 snd_hda_codec_write(codec, hp_pin, 0,
@@ -5109,7 +5149,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
5109 5149
5110 if (action == HDA_FIXUP_ACT_PROBE) { 5150 if (action == HDA_FIXUP_ACT_PROBE) {
5111 int mic_pin = find_ext_mic_pin(codec); 5151 int mic_pin = find_ext_mic_pin(codec);
5112 int hp_pin = spec->gen.autocfg.hp_pins[0]; 5152 int hp_pin = alc_get_hp_pin(spec);
5113 5153
5114 if (snd_BUG_ON(!mic_pin || !hp_pin)) 5154 if (snd_BUG_ON(!mic_pin || !hp_pin))
5115 return; 5155 return;
@@ -5591,6 +5631,7 @@ enum {
5591 ALC294_FIXUP_ASUS_HEADSET_MIC, 5631 ALC294_FIXUP_ASUS_HEADSET_MIC,
5592 ALC294_FIXUP_ASUS_SPK, 5632 ALC294_FIXUP_ASUS_SPK,
5593 ALC225_FIXUP_HEADSET_JACK, 5633 ALC225_FIXUP_HEADSET_JACK,
5634 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
5594}; 5635};
5595 5636
5596static const struct hda_fixup alc269_fixups[] = { 5637static const struct hda_fixup alc269_fixups[] = {
@@ -6537,6 +6578,15 @@ static const struct hda_fixup alc269_fixups[] = {
6537 .type = HDA_FIXUP_FUNC, 6578 .type = HDA_FIXUP_FUNC,
6538 .v.func = alc_fixup_headset_jack, 6579 .v.func = alc_fixup_headset_jack,
6539 }, 6580 },
6581 [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
6582 .type = HDA_FIXUP_PINS,
6583 .v.pins = (const struct hda_pintbl[]) {
6584 { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6585 { }
6586 },
6587 .chained = true,
6588 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6589 },
6540}; 6590};
6541 6591
6542static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6592static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6715,6 +6765,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6715 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), 6765 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
6716 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), 6766 SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
6717 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), 6767 SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
6768 SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
6718 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 6769 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
6719 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 6770 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
6720 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 6771 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -6926,7 +6977,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
6926 {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, 6977 {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"},
6927 {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, 6978 {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"},
6928 {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, 6979 {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"},
6929 {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, 6980 {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"},
6930 {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, 6981 {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"},
6931 {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, 6982 {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"},
6932 {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, 6983 {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"},
@@ -7373,37 +7424,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
7373 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 7424 alc_update_coef_idx(codec, 0x4, 0, 1<<11);
7374} 7425}
7375 7426
7376static void alc294_hp_init(struct hda_codec *codec)
7377{
7378 struct alc_spec *spec = codec->spec;
7379 hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
7380 int i, val;
7381
7382 if (!hp_pin)
7383 return;
7384
7385 snd_hda_codec_write(codec, hp_pin, 0,
7386 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
7387
7388 msleep(100);
7389
7390 snd_hda_codec_write(codec, hp_pin, 0,
7391 AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
7392
7393 alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
7394 alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
7395
7396 /* Wait for depop procedure finish */
7397 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7398 for (i = 0; i < 20 && val & 0x0080; i++) {
7399 msleep(50);
7400 val = alc_read_coefex_idx(codec, 0x58, 0x01);
7401 }
7402 /* Set HP depop to auto mode */
7403 alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
7404 msleep(50);
7405}
7406
7407/* 7427/*
7408 */ 7428 */
7409static int patch_alc269(struct hda_codec *codec) 7429static int patch_alc269(struct hda_codec *codec)
@@ -7529,7 +7549,7 @@ static int patch_alc269(struct hda_codec *codec)
7529 spec->codec_variant = ALC269_TYPE_ALC294; 7549 spec->codec_variant = ALC269_TYPE_ALC294;
7530 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ 7550 spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
7531 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ 7551 alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
7532 alc294_hp_init(codec); 7552 spec->init_hook = alc294_init;
7533 break; 7553 break;
7534 case 0x10ec0300: 7554 case 0x10ec0300:
7535 spec->codec_variant = ALC269_TYPE_ALC300; 7555 spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7541,7 +7561,7 @@ static int patch_alc269(struct hda_codec *codec)
7541 spec->codec_variant = ALC269_TYPE_ALC700; 7561 spec->codec_variant = ALC269_TYPE_ALC700;
7542 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ 7562 spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
7543 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ 7563 alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
7544 alc294_hp_init(codec); 7564 spec->init_hook = alc294_init;
7545 break; 7565 break;
7546 7566
7547 } 7567 }
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index 022a8912c8a2..3d58338fa3cf 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -611,14 +611,16 @@ static int acp3x_audio_probe(struct platform_device *pdev)
611 } 611 }
612 irqflags = *((unsigned int *)(pdev->dev.platform_data)); 612 irqflags = *((unsigned int *)(pdev->dev.platform_data));
613 613
614 adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data),
615 GFP_KERNEL);
616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 614 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
617 if (!res) { 615 if (!res) {
618 dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n"); 616 dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
619 return -ENODEV; 617 return -ENODEV;
620 } 618 }
621 619
620 adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
621 if (!adata)
622 return -ENOMEM;
623
622 adata->acp3x_base = devm_ioremap(&pdev->dev, res->start, 624 adata->acp3x_base = devm_ioremap(&pdev->dev, res->start,
623 resource_size(res)); 625 resource_size(res));
624 626
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 3ab2949c1dfa..b19d7a3e7a2c 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1890,51 +1890,31 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
1890 pm_runtime_disable(&hdev->dev); 1890 pm_runtime_disable(&hdev->dev);
1891} 1891}
1892 1892
1893#ifdef CONFIG_PM 1893#ifdef CONFIG_PM_SLEEP
1894static int hdmi_codec_prepare(struct device *dev) 1894static int hdmi_codec_resume(struct device *dev)
1895{
1896 struct hdac_device *hdev = dev_to_hdac_dev(dev);
1897
1898 pm_runtime_get_sync(&hdev->dev);
1899
1900 /*
1901 * Power down afg.
1902 * codec_read is preferred over codec_write to set the power state.
1903 * This way verb is send to set the power state and response
1904 * is received. So setting power state is ensured without using loop
1905 * to read the state.
1906 */
1907 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
1908 AC_PWRST_D3);
1909
1910 return 0;
1911}
1912
1913static void hdmi_codec_complete(struct device *dev)
1914{ 1895{
1915 struct hdac_device *hdev = dev_to_hdac_dev(dev); 1896 struct hdac_device *hdev = dev_to_hdac_dev(dev);
1916 struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev); 1897 struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
1898 int ret;
1917 1899
1918 /* Power up afg */ 1900 ret = pm_runtime_force_resume(dev);
1919 snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE, 1901 if (ret < 0)
1920 AC_PWRST_D0); 1902 return ret;
1921
1922 hdac_hdmi_skl_enable_all_pins(hdev);
1923 hdac_hdmi_skl_enable_dp12(hdev);
1924
1925 /* 1903 /*
1926 * As the ELD notify callback request is not entertained while the 1904 * As the ELD notify callback request is not entertained while the
1927 * device is in suspend state. Need to manually check detection of 1905 * device is in suspend state. Need to manually check detection of
1928 * all pins here. pin capablity change is not support, so use the 1906 * all pins here. pin capablity change is not support, so use the
1929 * already set pin caps. 1907 * already set pin caps.
1908 *
1909 * NOTE: this is safe to call even if the codec doesn't actually resume.
1910 * The pin check involves only with DRM audio component hooks, so it
1911 * works even if the HD-audio side is still dreaming peacefully.
1930 */ 1912 */
1931 hdac_hdmi_present_sense_all_pins(hdev, hdmi, false); 1913 hdac_hdmi_present_sense_all_pins(hdev, hdmi, false);
1932 1914 return 0;
1933 pm_runtime_put_sync(&hdev->dev);
1934} 1915}
1935#else 1916#else
1936#define hdmi_codec_prepare NULL 1917#define hdmi_codec_resume NULL
1937#define hdmi_codec_complete NULL
1938#endif 1918#endif
1939 1919
1940static const struct snd_soc_component_driver hdmi_hda_codec = { 1920static const struct snd_soc_component_driver hdmi_hda_codec = {
@@ -2135,75 +2115,6 @@ static int hdac_hdmi_dev_remove(struct hdac_device *hdev)
2135} 2115}
2136 2116
2137#ifdef CONFIG_PM 2117#ifdef CONFIG_PM
2138/*
2139 * Power management sequences
2140 * ==========================
2141 *
2142 * The following explains the PM handling of HDAC HDMI with its parent
2143 * device SKL and display power usage
2144 *
2145 * Probe
2146 * -----
2147 * In SKL probe,
2148 * 1. skl_probe_work() powers up the display (refcount++ -> 1)
2149 * 2. enumerates the codecs on the link
2150 * 3. powers down the display (refcount-- -> 0)
2151 *
2152 * In HDAC HDMI probe,
2153 * 1. hdac_hdmi_dev_probe() powers up the display (refcount++ -> 1)
2154 * 2. probe the codec
2155 * 3. put the HDAC HDMI device to runtime suspend
2156 * 4. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
2157 *
2158 * Once children are runtime suspended, SKL device also goes to runtime
2159 * suspend
2160 *
2161 * HDMI Playback
2162 * -------------
2163 * Open HDMI device,
2164 * 1. skl_runtime_resume() invoked
2165 * 2. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
2166 *
2167 * Close HDMI device,
2168 * 1. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
2169 * 2. skl_runtime_suspend() invoked
2170 *
2171 * S0/S3 Cycle with playback in progress
2172 * -------------------------------------
2173 * When the device is opened for playback, the device is runtime active
2174 * already and the display refcount is 1 as explained above.
2175 *
2176 * Entering to S3,
2177 * 1. hdmi_codec_prepare() invoke the runtime resume of codec which just
2178 * increments the PM runtime usage count of the codec since the device
2179 * is in use already
2180 * 2. skl_suspend() powers down the display (refcount-- -> 0)
2181 *
2182 * Wakeup from S3,
2183 * 1. skl_resume() powers up the display (refcount++ -> 1)
2184 * 2. hdmi_codec_complete() invokes the runtime suspend of codec which just
2185 * decrements the PM runtime usage count of the codec since the device
2186 * is in use already
2187 *
2188 * Once playback is stopped, the display refcount is set to 0 as explained
2189 * above in the HDMI playback sequence. The PM handlings are designed in
2190 * such way that to balance the refcount of display power when the codec
2191 * device put to S3 while playback is going on.
2192 *
2193 * S0/S3 Cycle without playback in progress
2194 * ----------------------------------------
2195 * Entering to S3,
2196 * 1. hdmi_codec_prepare() invoke the runtime resume of codec
2197 * 2. skl_runtime_resume() invoked
2198 * 3. hdac_hdmi_runtime_resume() powers up the display (refcount++ -> 1)
2199 * 4. skl_suspend() powers down the display (refcount-- -> 0)
2200 *
2201 * Wakeup from S3,
2202 * 1. skl_resume() powers up the display (refcount++ -> 1)
2203 * 2. hdmi_codec_complete() invokes the runtime suspend of codec
2204 * 3. hdac_hdmi_runtime_suspend() powers down the display (refcount-- -> 0)
2205 * 4. skl_runtime_suspend() invoked
2206 */
2207static int hdac_hdmi_runtime_suspend(struct device *dev) 2118static int hdac_hdmi_runtime_suspend(struct device *dev)
2208{ 2119{
2209 struct hdac_device *hdev = dev_to_hdac_dev(dev); 2120 struct hdac_device *hdev = dev_to_hdac_dev(dev);
@@ -2277,8 +2188,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
2277 2188
2278static const struct dev_pm_ops hdac_hdmi_pm = { 2189static const struct dev_pm_ops hdac_hdmi_pm = {
2279 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) 2190 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
2280 .prepare = hdmi_codec_prepare, 2191 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, hdmi_codec_resume)
2281 .complete = hdmi_codec_complete,
2282}; 2192};
2283 2193
2284static const struct hda_device_id hdmi_list[] = { 2194static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 6cb1653be804..4cc24a5d5c31 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1400,24 +1400,20 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
1400 if (ret != 0) { 1400 if (ret != 0) {
1401 dev_err(component->dev, 1401 dev_err(component->dev,
1402 "Failed to set digital mute: %d\n", ret); 1402 "Failed to set digital mute: %d\n", ret);
1403 mutex_unlock(&pcm512x->mutex); 1403 goto unlock;
1404 return ret;
1405 } 1404 }
1406 1405
1407 regmap_read_poll_timeout(pcm512x->regmap, 1406 regmap_read_poll_timeout(pcm512x->regmap,
1408 PCM512x_ANALOG_MUTE_DET, 1407 PCM512x_ANALOG_MUTE_DET,
1409 mute_det, (mute_det & 0x3) == 0, 1408 mute_det, (mute_det & 0x3) == 0,
1410 200, 10000); 1409 200, 10000);
1411
1412 mutex_unlock(&pcm512x->mutex);
1413 } else { 1410 } else {
1414 pcm512x->mute &= ~0x1; 1411 pcm512x->mute &= ~0x1;
1415 ret = pcm512x_update_mute(pcm512x); 1412 ret = pcm512x_update_mute(pcm512x);
1416 if (ret != 0) { 1413 if (ret != 0) {
1417 dev_err(component->dev, 1414 dev_err(component->dev,
1418 "Failed to update digital mute: %d\n", ret); 1415 "Failed to update digital mute: %d\n", ret);
1419 mutex_unlock(&pcm512x->mutex); 1416 goto unlock;
1420 return ret;
1421 } 1417 }
1422 1418
1423 regmap_read_poll_timeout(pcm512x->regmap, 1419 regmap_read_poll_timeout(pcm512x->regmap,
@@ -1428,9 +1424,10 @@ static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
1428 200, 10000); 1424 200, 10000);
1429 } 1425 }
1430 1426
1427unlock:
1431 mutex_unlock(&pcm512x->mutex); 1428 mutex_unlock(&pcm512x->mutex);
1432 1429
1433 return 0; 1430 return ret;
1434} 1431}
1435 1432
1436static const struct snd_soc_dai_ops pcm512x_dai_ops = { 1433static const struct snd_soc_dai_ops pcm512x_dai_ops = {
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index 0ef966d56bac..e2855ab9a2c6 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -1128,8 +1128,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c,
1128 return ret; 1128 return ret;
1129 } 1129 }
1130 1130
1131 regmap_read(rt274->regmap, 1131 ret = regmap_read(rt274->regmap,
1132 RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val); 1132 RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
1133 if (ret)
1134 return ret;
1135
1133 if (val != RT274_VENDOR_ID) { 1136 if (val != RT274_VENDOR_ID) {
1134 dev_err(&i2c->dev, 1137 dev_err(&i2c->dev,
1135 "Device with ID register %#x is not rt274\n", val); 1138 "Device with ID register %#x is not rt274\n", val);
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 4d46f4567c3a..bec2eefa8b0f 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -280,6 +280,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component)
280 280
281 rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), 281 rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp),
282 GFP_KERNEL); 282 GFP_KERNEL);
283 if (!rt5514_dsp)
284 return -ENOMEM;
283 285
284 rt5514_dsp->dev = &rt5514_spi->dev; 286 rt5514_dsp->dev = &rt5514_spi->dev;
285 mutex_init(&rt5514_dsp->dma_lock); 287 mutex_init(&rt5514_dsp->dma_lock);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 34cfaf8f6f34..89c43b26c379 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -2512,6 +2512,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
2512 regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000); 2512 regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
2513 regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000); 2513 regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
2514 regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005); 2514 regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
2515 regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
2515 2516
2516 mutex_unlock(&rt5682->calibrate_mutex); 2517 mutex_unlock(&rt5682->calibrate_mutex);
2517 2518
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index d82a8301fd74..96944cff0ed7 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -849,18 +849,18 @@
849#define RT5682_SCLK_SRC_PLL2 (0x2 << 13) 849#define RT5682_SCLK_SRC_PLL2 (0x2 << 13)
850#define RT5682_SCLK_SRC_SDW (0x3 << 13) 850#define RT5682_SCLK_SRC_SDW (0x3 << 13)
851#define RT5682_SCLK_SRC_RCCLK (0x4 << 13) 851#define RT5682_SCLK_SRC_RCCLK (0x4 << 13)
852#define RT5682_PLL1_SRC_MASK (0x3 << 10) 852#define RT5682_PLL2_SRC_MASK (0x3 << 10)
853#define RT5682_PLL1_SRC_SFT 10 853#define RT5682_PLL2_SRC_SFT 10
854#define RT5682_PLL1_SRC_MCLK (0x0 << 10) 854#define RT5682_PLL2_SRC_MCLK (0x0 << 10)
855#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10) 855#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10)
856#define RT5682_PLL1_SRC_SDW (0x2 << 10) 856#define RT5682_PLL2_SRC_SDW (0x2 << 10)
857#define RT5682_PLL1_SRC_RC (0x3 << 10) 857#define RT5682_PLL2_SRC_RC (0x3 << 10)
858#define RT5682_PLL2_SRC_MASK (0x3 << 8) 858#define RT5682_PLL1_SRC_MASK (0x3 << 8)
859#define RT5682_PLL2_SRC_SFT 8 859#define RT5682_PLL1_SRC_SFT 8
860#define RT5682_PLL2_SRC_MCLK (0x0 << 8) 860#define RT5682_PLL1_SRC_MCLK (0x0 << 8)
861#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8) 861#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8)
862#define RT5682_PLL2_SRC_SDW (0x2 << 8) 862#define RT5682_PLL1_SRC_SDW (0x2 << 8)
863#define RT5682_PLL2_SRC_RC (0x3 << 8) 863#define RT5682_PLL1_SRC_RC (0x3 << 8)
864 864
865 865
866 866
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e2b5a11b16d1..f03195d2ab2e 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component,
822 case SND_SOC_BIAS_PREPARE: 822 case SND_SOC_BIAS_PREPARE:
823 break; 823 break;
824 case SND_SOC_BIAS_STANDBY: 824 case SND_SOC_BIAS_STANDBY:
825 /* Initial cold start */
826 if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF)
827 break;
828
825 /* Switch off BCLK_N Divider */ 829 /* Switch off BCLK_N Divider */
826 snd_soc_component_update_bits(component, AIC32X4_BCLKN, 830 snd_soc_component_update_bits(component, AIC32X4_BCLKN,
827 AIC32X4_BCLKEN, 0); 831 AIC32X4_BCLKEN, 0);
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 392d5eef356d..99e07b01a2ce 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
86 if (!buf) 86 if (!buf)
87 return -ENOMEM; 87 return -ENOMEM;
88 88
89 ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n", 89 ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
90 pdcr, ptcr); 90 pdcr, ptcr);
91 91
92 if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR) 92 if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
93 ret += snprintf(buf + ret, PAGE_SIZE - ret, 93 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
94 "TxFS output from %s, ", 94 "TxFS output from %s, ",
95 audmux_port_string((ptcr >> 27) & 0x7)); 95 audmux_port_string((ptcr >> 27) & 0x7));
96 else 96 else
97 ret += snprintf(buf + ret, PAGE_SIZE - ret, 97 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
98 "TxFS input, "); 98 "TxFS input, ");
99 99
100 if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR) 100 if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
101 ret += snprintf(buf + ret, PAGE_SIZE - ret, 101 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
102 "TxClk output from %s", 102 "TxClk output from %s",
103 audmux_port_string((ptcr >> 22) & 0x7)); 103 audmux_port_string((ptcr >> 22) & 0x7));
104 else 104 else
105 ret += snprintf(buf + ret, PAGE_SIZE - ret, 105 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
106 "TxClk input"); 106 "TxClk input");
107 107
108 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); 108 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
109 109
110 if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) { 110 if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
111 ret += snprintf(buf + ret, PAGE_SIZE - ret, 111 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
112 "Port is symmetric"); 112 "Port is symmetric");
113 } else { 113 } else {
114 if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR) 114 if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
115 ret += snprintf(buf + ret, PAGE_SIZE - ret, 115 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
116 "RxFS output from %s, ", 116 "RxFS output from %s, ",
117 audmux_port_string((ptcr >> 17) & 0x7)); 117 audmux_port_string((ptcr >> 17) & 0x7));
118 else 118 else
119 ret += snprintf(buf + ret, PAGE_SIZE - ret, 119 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
120 "RxFS input, "); 120 "RxFS input, ");
121 121
122 if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR) 122 if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
123 ret += snprintf(buf + ret, PAGE_SIZE - ret, 123 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
124 "RxClk output from %s", 124 "RxClk output from %s",
125 audmux_port_string((ptcr >> 12) & 0x7)); 125 audmux_port_string((ptcr >> 12) & 0x7));
126 else 126 else
127 ret += snprintf(buf + ret, PAGE_SIZE - ret, 127 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
128 "RxClk input"); 128 "RxClk input");
129 } 129 }
130 130
131 ret += snprintf(buf + ret, PAGE_SIZE - ret, 131 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
132 "\nData received from %s\n", 132 "\nData received from %s\n",
133 audmux_port_string((pdcr >> 13) & 0x7)); 133 audmux_port_string((pdcr >> 13) & 0x7));
134 134
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 99a62ba409df..bd9fd2035c55 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -91,7 +91,7 @@ config SND_SST_ATOM_HIFI2_PLATFORM_PCI
91config SND_SST_ATOM_HIFI2_PLATFORM_ACPI 91config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" 92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
93 default ACPI 93 default ACPI
94 depends on X86 && ACPI 94 depends on X86 && ACPI && PCI
95 select SND_SST_IPC_ACPI 95 select SND_SST_IPC_ACPI
96 select SND_SST_ATOM_HIFI2_PLATFORM 96 select SND_SST_ATOM_HIFI2_PLATFORM
97 select SND_SOC_ACPI_INTEL_MATCH 97 select SND_SOC_ACPI_INTEL_MATCH
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index afc559866095..91a2436ce952 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
399 struct snd_pcm_hw_params *params, 399 struct snd_pcm_hw_params *params,
400 struct snd_soc_dai *dai) 400 struct snd_soc_dai *dai)
401{ 401{
402 snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); 402 int ret;
403
404 ret =
405 snd_pcm_lib_malloc_pages(substream,
406 params_buffer_bytes(params));
407 if (ret)
408 return ret;
403 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); 409 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
404 return 0; 410 return 0;
405} 411}
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 68e6543e6cb0..99f2a0156ae8 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
192 .stream_name = "Loopback", 192 .stream_name = "Loopback",
193 .cpu_dai_name = "Loopback Pin", 193 .cpu_dai_name = "Loopback Pin",
194 .platform_name = "haswell-pcm-audio", 194 .platform_name = "haswell-pcm-audio",
195 .dynamic = 0, 195 .dynamic = 1,
196 .codec_name = "snd-soc-dummy", 196 .codec_name = "snd-soc-dummy",
197 .codec_dai_name = "snd-soc-dummy-dai", 197 .codec_dai_name = "snd-soc-dummy-dai",
198 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 198 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index c74c4f17316f..8f83b182c4f9 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -55,39 +55,6 @@ enum {
55 GLK_DPCM_AUDIO_HDMI3_PB, 55 GLK_DPCM_AUDIO_HDMI3_PB,
56}; 56};
57 57
58static int platform_clock_control(struct snd_soc_dapm_widget *w,
59 struct snd_kcontrol *k, int event)
60{
61 struct snd_soc_dapm_context *dapm = w->dapm;
62 struct snd_soc_card *card = dapm->card;
63 struct snd_soc_dai *codec_dai;
64 int ret = 0;
65
66 codec_dai = snd_soc_card_get_codec_dai(card, GLK_REALTEK_CODEC_DAI);
67 if (!codec_dai) {
68 dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
69 return -EIO;
70 }
71
72 if (SND_SOC_DAPM_EVENT_OFF(event)) {
73 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
74 if (ret)
75 dev_err(card->dev, "failed to stop sysclk: %d\n", ret);
76 } else if (SND_SOC_DAPM_EVENT_ON(event)) {
77 ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
78 GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
79 if (ret < 0) {
80 dev_err(card->dev, "can't set codec pll: %d\n", ret);
81 return ret;
82 }
83 }
84
85 if (ret)
86 dev_err(card->dev, "failed to start internal clk: %d\n", ret);
87
88 return ret;
89}
90
91static const struct snd_kcontrol_new geminilake_controls[] = { 58static const struct snd_kcontrol_new geminilake_controls[] = {
92 SOC_DAPM_PIN_SWITCH("Headphone Jack"), 59 SOC_DAPM_PIN_SWITCH("Headphone Jack"),
93 SOC_DAPM_PIN_SWITCH("Headset Mic"), 60 SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -102,14 +69,10 @@ static const struct snd_soc_dapm_widget geminilake_widgets[] = {
102 SND_SOC_DAPM_SPK("HDMI1", NULL), 69 SND_SOC_DAPM_SPK("HDMI1", NULL),
103 SND_SOC_DAPM_SPK("HDMI2", NULL), 70 SND_SOC_DAPM_SPK("HDMI2", NULL),
104 SND_SOC_DAPM_SPK("HDMI3", NULL), 71 SND_SOC_DAPM_SPK("HDMI3", NULL),
105 SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
106 platform_clock_control, SND_SOC_DAPM_PRE_PMU |
107 SND_SOC_DAPM_POST_PMD),
108}; 72};
109 73
110static const struct snd_soc_dapm_route geminilake_map[] = { 74static const struct snd_soc_dapm_route geminilake_map[] = {
111 /* HP jack connectors - unknown if we have jack detection */ 75 /* HP jack connectors - unknown if we have jack detection */
112 { "Headphone Jack", NULL, "Platform Clock" },
113 { "Headphone Jack", NULL, "HPOL" }, 76 { "Headphone Jack", NULL, "HPOL" },
114 { "Headphone Jack", NULL, "HPOR" }, 77 { "Headphone Jack", NULL, "HPOR" },
115 78
@@ -117,7 +80,6 @@ static const struct snd_soc_dapm_route geminilake_map[] = {
117 { "Spk", NULL, "Speaker" }, 80 { "Spk", NULL, "Speaker" },
118 81
119 /* other jacks */ 82 /* other jacks */
120 { "Headset Mic", NULL, "Platform Clock" },
121 { "IN1P", NULL, "Headset Mic" }, 83 { "IN1P", NULL, "Headset Mic" },
122 84
123 /* digital mics */ 85 /* digital mics */
@@ -177,6 +139,13 @@ static int geminilake_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
177 struct snd_soc_jack *jack; 139 struct snd_soc_jack *jack;
178 int ret; 140 int ret;
179 141
142 ret = snd_soc_dai_set_pll(codec_dai, 0, RT5682_PLL1_S_MCLK,
143 GLK_PLAT_CLK_FREQ, RT5682_PLL_FREQ);
144 if (ret < 0) {
145 dev_err(rtd->dev, "can't set codec pll: %d\n", ret);
146 return ret;
147 }
148
180 /* Configure sysclk for codec */ 149 /* Configure sysclk for codec */
181 ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, 150 ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1,
182 RT5682_PLL_FREQ, SND_SOC_CLOCK_IN); 151 RT5682_PLL_FREQ, SND_SOC_CLOCK_IN);
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index eab1f439dd3f..a4022983a7ce 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
146 .stream_name = "Loopback", 146 .stream_name = "Loopback",
147 .cpu_dai_name = "Loopback Pin", 147 .cpu_dai_name = "Loopback Pin",
148 .platform_name = "haswell-pcm-audio", 148 .platform_name = "haswell-pcm-audio",
149 .dynamic = 0, 149 .dynamic = 1,
150 .codec_name = "snd-soc-dummy", 150 .codec_name = "snd-soc-dummy",
151 .codec_dai_name = "snd-soc-dummy-dai", 151 .codec_dai_name = "snd-soc-dummy-dai",
152 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 152 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 60c94836bf5b..4ed5b7e17d44 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -336,9 +336,6 @@ static int skl_suspend(struct device *dev)
336 skl->skl_sst->fw_loaded = false; 336 skl->skl_sst->fw_loaded = false;
337 } 337 }
338 338
339 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
340 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
341
342 return 0; 339 return 0;
343} 340}
344 341
@@ -350,10 +347,6 @@ static int skl_resume(struct device *dev)
350 struct hdac_ext_link *hlink = NULL; 347 struct hdac_ext_link *hlink = NULL;
351 int ret; 348 int ret;
352 349
353 /* Turned OFF in HDMI codec driver after codec reconfiguration */
354 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
355 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
356
357 /* 350 /*
358 * resume only when we are not in suspend active, otherwise need to 351 * resume only when we are not in suspend active, otherwise need to
359 * restore the device 352 * restore the device
@@ -446,8 +439,10 @@ static int skl_free(struct hdac_bus *bus)
446 snd_hdac_ext_bus_exit(bus); 439 snd_hdac_ext_bus_exit(bus);
447 440
448 cancel_work_sync(&skl->probe_work); 441 cancel_work_sync(&skl->probe_work);
449 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 442 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
443 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
450 snd_hdac_i915_exit(bus); 444 snd_hdac_i915_exit(bus);
445 }
451 446
452 return 0; 447 return 0;
453} 448}
@@ -814,7 +809,7 @@ static void skl_probe_work(struct work_struct *work)
814 err = skl_platform_register(bus->dev); 809 err = skl_platform_register(bus->dev);
815 if (err < 0) { 810 if (err < 0) {
816 dev_err(bus->dev, "platform register failed: %d\n", err); 811 dev_err(bus->dev, "platform register failed: %d\n", err);
817 return; 812 goto out_err;
818 } 813 }
819 814
820 err = skl_machine_device_register(skl); 815 err = skl_machine_device_register(skl);
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 5b986b74dd36..548eb4fa2da6 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -570,10 +570,10 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
570 prtd->audio_client = q6asm_audio_client_alloc(dev, 570 prtd->audio_client = q6asm_audio_client_alloc(dev,
571 (q6asm_cb)compress_event_handler, 571 (q6asm_cb)compress_event_handler,
572 prtd, stream_id, LEGACY_PCM_MODE); 572 prtd, stream_id, LEGACY_PCM_MODE);
573 if (!prtd->audio_client) { 573 if (IS_ERR(prtd->audio_client)) {
574 dev_err(dev, "Could not allocate memory\n"); 574 dev_err(dev, "Could not allocate memory\n");
575 kfree(prtd); 575 ret = PTR_ERR(prtd->audio_client);
576 return -ENOMEM; 576 goto free_prtd;
577 } 577 }
578 578
579 size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * 579 size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE *
@@ -582,7 +582,7 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
582 &prtd->dma_buffer); 582 &prtd->dma_buffer);
583 if (ret) { 583 if (ret) {
584 dev_err(dev, "Cannot allocate buffer(s)\n"); 584 dev_err(dev, "Cannot allocate buffer(s)\n");
585 return ret; 585 goto free_client;
586 } 586 }
587 587
588 if (pdata->sid < 0) 588 if (pdata->sid < 0)
@@ -595,6 +595,13 @@ static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
595 runtime->private_data = prtd; 595 runtime->private_data = prtd;
596 596
597 return 0; 597 return 0;
598
599free_client:
600 q6asm_audio_client_free(prtd->audio_client);
601free_prtd:
602 kfree(prtd);
603
604 return ret;
598} 605}
599 606
600static int q6asm_dai_compr_free(struct snd_compr_stream *stream) 607static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
@@ -874,7 +881,7 @@ static int of_q6asm_parse_dai_data(struct device *dev,
874 881
875 for_each_child_of_node(dev->of_node, node) { 882 for_each_child_of_node(dev->of_node, node) {
876 ret = of_property_read_u32(node, "reg", &id); 883 ret = of_property_read_u32(node, "reg", &id);
877 if (ret || id > MAX_SESSIONS || id < 0) { 884 if (ret || id >= MAX_SESSIONS || id < 0) {
878 dev_err(dev, "valid dai id not found:%d\n", ret); 885 dev_err(dev, "valid dai id not found:%d\n", ret);
879 continue; 886 continue;
880 } 887 }
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 1db8ef668223..6f66a58e23ca 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -158,17 +158,24 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
158 return ret; 158 return ret;
159} 159}
160 160
161static void sdm845_jack_free(struct snd_jack *jack)
162{
163 struct snd_soc_component *component = jack->private_data;
164
165 snd_soc_component_set_jack(component, NULL, NULL);
166}
167
161static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd) 168static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
162{ 169{
163 struct snd_soc_component *component; 170 struct snd_soc_component *component;
164 struct snd_soc_dai_link *dai_link = rtd->dai_link;
165 struct snd_soc_card *card = rtd->card; 171 struct snd_soc_card *card = rtd->card;
172 struct snd_soc_dai *codec_dai = rtd->codec_dai;
173 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
166 struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card); 174 struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
167 int i, rval; 175 struct snd_jack *jack;
176 int rval;
168 177
169 if (!pdata->jack_setup) { 178 if (!pdata->jack_setup) {
170 struct snd_jack *jack;
171
172 rval = snd_soc_card_jack_new(card, "Headset Jack", 179 rval = snd_soc_card_jack_new(card, "Headset Jack",
173 SND_JACK_HEADSET | 180 SND_JACK_HEADSET |
174 SND_JACK_HEADPHONE | 181 SND_JACK_HEADPHONE |
@@ -190,16 +197,22 @@ static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
190 pdata->jack_setup = true; 197 pdata->jack_setup = true;
191 } 198 }
192 199
193 for (i = 0 ; i < dai_link->num_codecs; i++) { 200 switch (cpu_dai->id) {
194 struct snd_soc_dai *dai = rtd->codec_dais[i]; 201 case PRIMARY_MI2S_RX:
202 jack = pdata->jack.jack;
203 component = codec_dai->component;
195 204
196 component = dai->component; 205 jack->private_data = component;
197 rval = snd_soc_component_set_jack( 206 jack->private_free = sdm845_jack_free;
198 component, &pdata->jack, NULL); 207 rval = snd_soc_component_set_jack(component,
208 &pdata->jack, NULL);
199 if (rval != 0 && rval != -ENOTSUPP) { 209 if (rval != 0 && rval != -ENOTSUPP) {
200 dev_warn(card->dev, "Failed to set jack: %d\n", rval); 210 dev_warn(card->dev, "Failed to set jack: %d\n", rval);
201 return rval; 211 return rval;
202 } 212 }
213 break;
214 default:
215 break;
203 } 216 }
204 217
205 return 0; 218 return 0;
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index 922fb6aa3ed1..5aee11c94f2a 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -202,7 +202,7 @@ static int camelot_prepare(struct snd_pcm_substream *substream)
202 struct snd_soc_pcm_runtime *rtd = substream->private_data; 202 struct snd_soc_pcm_runtime *rtd = substream->private_data;
203 struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id]; 203 struct camelot_pcm *cam = &cam_pcm_data[rtd->cpu_dai->id];
204 204
205 pr_debug("PCM data: addr 0x%08ulx len %d\n", 205 pr_debug("PCM data: addr 0x%08lx len %d\n",
206 (u32)runtime->dma_addr, runtime->dma_bytes); 206 (u32)runtime->dma_addr, runtime->dma_bytes);
207 207
208 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 208 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0462b3ec977a..aae450ba4f08 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -742,7 +742,7 @@ static struct snd_soc_component *soc_find_component(
742 if (of_node) { 742 if (of_node) {
743 if (component->dev->of_node == of_node) 743 if (component->dev->of_node == of_node)
744 return component; 744 return component;
745 } else if (strcmp(component->name, name) == 0) { 745 } else if (name && strcmp(component->name, name) == 0) {
746 return component; 746 return component;
747 } 747 }
748 } 748 }
@@ -1034,17 +1034,18 @@ static int snd_soc_init_platform(struct snd_soc_card *card,
1034 * this function should be removed in the future 1034 * this function should be removed in the future
1035 */ 1035 */
1036 /* convert Legacy platform link */ 1036 /* convert Legacy platform link */
1037 if (!platform) { 1037 if (!platform || dai_link->legacy_platform) {
1038 platform = devm_kzalloc(card->dev, 1038 platform = devm_kzalloc(card->dev,
1039 sizeof(struct snd_soc_dai_link_component), 1039 sizeof(struct snd_soc_dai_link_component),
1040 GFP_KERNEL); 1040 GFP_KERNEL);
1041 if (!platform) 1041 if (!platform)
1042 return -ENOMEM; 1042 return -ENOMEM;
1043 1043
1044 dai_link->platform = platform; 1044 dai_link->platform = platform;
1045 platform->name = dai_link->platform_name; 1045 dai_link->legacy_platform = 1;
1046 platform->of_node = dai_link->platform_of_node; 1046 platform->name = dai_link->platform_name;
1047 platform->dai_name = NULL; 1047 platform->of_node = dai_link->platform_of_node;
1048 platform->dai_name = NULL;
1048 } 1049 }
1049 1050
1050 /* if there's no platform we match on the empty platform */ 1051 /* if there's no platform we match on the empty platform */
@@ -1129,6 +1130,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
1129 link->name); 1130 link->name);
1130 return -EINVAL; 1131 return -EINVAL;
1131 } 1132 }
1133
1134 /*
1135 * Defer card registartion if platform dai component is not added to
1136 * component list.
1137 */
1138 if ((link->platform->of_node || link->platform->name) &&
1139 !soc_find_component(link->platform->of_node, link->platform->name))
1140 return -EPROBE_DEFER;
1141
1132 /* 1142 /*
1133 * CPU device may be specified by either name or OF node, but 1143 * CPU device may be specified by either name or OF node, but
1134 * can be left unspecified, and will be matched based on DAI 1144 * can be left unspecified, and will be matched based on DAI
@@ -1140,6 +1150,15 @@ static int soc_init_dai_link(struct snd_soc_card *card,
1140 link->name); 1150 link->name);
1141 return -EINVAL; 1151 return -EINVAL;
1142 } 1152 }
1153
1154 /*
1155 * Defer card registartion if cpu dai component is not added to
1156 * component list.
1157 */
1158 if ((link->cpu_of_node || link->cpu_name) &&
1159 !soc_find_component(link->cpu_of_node, link->cpu_name))
1160 return -EPROBE_DEFER;
1161
1143 /* 1162 /*
1144 * At least one of CPU DAI name or CPU device name/node must be 1163 * At least one of CPU DAI name or CPU device name/node must be
1145 * specified 1164 * specified
@@ -2739,15 +2758,18 @@ int snd_soc_register_card(struct snd_soc_card *card)
2739 if (!card->name || !card->dev) 2758 if (!card->name || !card->dev)
2740 return -EINVAL; 2759 return -EINVAL;
2741 2760
2761 mutex_lock(&client_mutex);
2742 for_each_card_prelinks(card, i, link) { 2762 for_each_card_prelinks(card, i, link) {
2743 2763
2744 ret = soc_init_dai_link(card, link); 2764 ret = soc_init_dai_link(card, link);
2745 if (ret) { 2765 if (ret) {
2746 dev_err(card->dev, "ASoC: failed to init link %s\n", 2766 dev_err(card->dev, "ASoC: failed to init link %s\n",
2747 link->name); 2767 link->name);
2768 mutex_unlock(&client_mutex);
2748 return ret; 2769 return ret;
2749 } 2770 }
2750 } 2771 }
2772 mutex_unlock(&client_mutex);
2751 2773
2752 dev_set_drvdata(card->dev, card); 2774 dev_set_drvdata(card->dev, card);
2753 2775
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index a5178845065b..2c4c13419539 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2019,19 +2019,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2019 out = is_connected_output_ep(w, NULL, NULL); 2019 out = is_connected_output_ep(w, NULL, NULL);
2020 } 2020 }
2021 2021
2022 ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", 2022 ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
2023 w->name, w->power ? "On" : "Off", 2023 w->name, w->power ? "On" : "Off",
2024 w->force ? " (forced)" : "", in, out); 2024 w->force ? " (forced)" : "", in, out);
2025 2025
2026 if (w->reg >= 0) 2026 if (w->reg >= 0)
2027 ret += snprintf(buf + ret, PAGE_SIZE - ret, 2027 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2028 " - R%d(0x%x) mask 0x%x", 2028 " - R%d(0x%x) mask 0x%x",
2029 w->reg, w->reg, w->mask << w->shift); 2029 w->reg, w->reg, w->mask << w->shift);
2030 2030
2031 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n"); 2031 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2032 2032
2033 if (w->sname) 2033 if (w->sname)
2034 ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n", 2034 ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
2035 w->sname, 2035 w->sname,
2036 w->active ? "active" : "inactive"); 2036 w->active ? "active" : "inactive");
2037 2037
@@ -2044,7 +2044,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2044 if (!p->connect) 2044 if (!p->connect)
2045 continue; 2045 continue;
2046 2046
2047 ret += snprintf(buf + ret, PAGE_SIZE - ret, 2047 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2048 " %s \"%s\" \"%s\"\n", 2048 " %s \"%s\" \"%s\"\n",
2049 (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out", 2049 (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
2050 p->name ? p->name : "static", 2050 p->name ? p->name : "static",
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index eeda6d5565bc..a10fcb5963c6 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -108,7 +108,7 @@ struct davinci_mcasp {
108 /* Used for comstraint setting on the second stream */ 108 /* Used for comstraint setting on the second stream */
109 u32 channels; 109 u32 channels;
110 110
111#ifdef CONFIG_PM_SLEEP 111#ifdef CONFIG_PM
112 struct davinci_mcasp_context context; 112 struct davinci_mcasp_context context;
113#endif 113#endif
114 114
@@ -1486,74 +1486,6 @@ static int davinci_mcasp_dai_probe(struct snd_soc_dai *dai)
1486 return 0; 1486 return 0;
1487} 1487}
1488 1488
1489#ifdef CONFIG_PM_SLEEP
1490static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
1491{
1492 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
1493 struct davinci_mcasp_context *context = &mcasp->context;
1494 u32 reg;
1495 int i;
1496
1497 context->pm_state = pm_runtime_active(mcasp->dev);
1498 if (!context->pm_state)
1499 pm_runtime_get_sync(mcasp->dev);
1500
1501 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
1502 context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
1503
1504 if (mcasp->txnumevt) {
1505 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
1506 context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
1507 }
1508 if (mcasp->rxnumevt) {
1509 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
1510 context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
1511 }
1512
1513 for (i = 0; i < mcasp->num_serializer; i++)
1514 context->xrsr_regs[i] = mcasp_get_reg(mcasp,
1515 DAVINCI_MCASP_XRSRCTL_REG(i));
1516
1517 pm_runtime_put_sync(mcasp->dev);
1518
1519 return 0;
1520}
1521
1522static int davinci_mcasp_resume(struct snd_soc_dai *dai)
1523{
1524 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(dai);
1525 struct davinci_mcasp_context *context = &mcasp->context;
1526 u32 reg;
1527 int i;
1528
1529 pm_runtime_get_sync(mcasp->dev);
1530
1531 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
1532 mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
1533
1534 if (mcasp->txnumevt) {
1535 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
1536 mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
1537 }
1538 if (mcasp->rxnumevt) {
1539 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
1540 mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
1541 }
1542
1543 for (i = 0; i < mcasp->num_serializer; i++)
1544 mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
1545 context->xrsr_regs[i]);
1546
1547 if (!context->pm_state)
1548 pm_runtime_put_sync(mcasp->dev);
1549
1550 return 0;
1551}
1552#else
1553#define davinci_mcasp_suspend NULL
1554#define davinci_mcasp_resume NULL
1555#endif
1556
1557#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000 1489#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_192000
1558 1490
1559#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \ 1491#define DAVINCI_MCASP_PCM_FMTS (SNDRV_PCM_FMTBIT_S8 | \
@@ -1571,8 +1503,6 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
1571 { 1503 {
1572 .name = "davinci-mcasp.0", 1504 .name = "davinci-mcasp.0",
1573 .probe = davinci_mcasp_dai_probe, 1505 .probe = davinci_mcasp_dai_probe,
1574 .suspend = davinci_mcasp_suspend,
1575 .resume = davinci_mcasp_resume,
1576 .playback = { 1506 .playback = {
1577 .channels_min = 1, 1507 .channels_min = 1,
1578 .channels_max = 32 * 16, 1508 .channels_max = 32 * 16,
@@ -1976,7 +1906,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1976 } 1906 }
1977 1907
1978 mcasp->num_serializer = pdata->num_serializer; 1908 mcasp->num_serializer = pdata->num_serializer;
1979#ifdef CONFIG_PM_SLEEP 1909#ifdef CONFIG_PM
1980 mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev, 1910 mcasp->context.xrsr_regs = devm_kcalloc(&pdev->dev,
1981 mcasp->num_serializer, sizeof(u32), 1911 mcasp->num_serializer, sizeof(u32),
1982 GFP_KERNEL); 1912 GFP_KERNEL);
@@ -2196,11 +2126,73 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
2196 return 0; 2126 return 0;
2197} 2127}
2198 2128
2129#ifdef CONFIG_PM
2130static int davinci_mcasp_runtime_suspend(struct device *dev)
2131{
2132 struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
2133 struct davinci_mcasp_context *context = &mcasp->context;
2134 u32 reg;
2135 int i;
2136
2137 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
2138 context->config_regs[i] = mcasp_get_reg(mcasp, context_regs[i]);
2139
2140 if (mcasp->txnumevt) {
2141 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
2142 context->afifo_regs[0] = mcasp_get_reg(mcasp, reg);
2143 }
2144 if (mcasp->rxnumevt) {
2145 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
2146 context->afifo_regs[1] = mcasp_get_reg(mcasp, reg);
2147 }
2148
2149 for (i = 0; i < mcasp->num_serializer; i++)
2150 context->xrsr_regs[i] = mcasp_get_reg(mcasp,
2151 DAVINCI_MCASP_XRSRCTL_REG(i));
2152
2153 return 0;
2154}
2155
2156static int davinci_mcasp_runtime_resume(struct device *dev)
2157{
2158 struct davinci_mcasp *mcasp = dev_get_drvdata(dev);
2159 struct davinci_mcasp_context *context = &mcasp->context;
2160 u32 reg;
2161 int i;
2162
2163 for (i = 0; i < ARRAY_SIZE(context_regs); i++)
2164 mcasp_set_reg(mcasp, context_regs[i], context->config_regs[i]);
2165
2166 if (mcasp->txnumevt) {
2167 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
2168 mcasp_set_reg(mcasp, reg, context->afifo_regs[0]);
2169 }
2170 if (mcasp->rxnumevt) {
2171 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
2172 mcasp_set_reg(mcasp, reg, context->afifo_regs[1]);
2173 }
2174
2175 for (i = 0; i < mcasp->num_serializer; i++)
2176 mcasp_set_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
2177 context->xrsr_regs[i]);
2178
2179 return 0;
2180}
2181
2182#endif
2183
2184static const struct dev_pm_ops davinci_mcasp_pm_ops = {
2185 SET_RUNTIME_PM_OPS(davinci_mcasp_runtime_suspend,
2186 davinci_mcasp_runtime_resume,
2187 NULL)
2188};
2189
2199static struct platform_driver davinci_mcasp_driver = { 2190static struct platform_driver davinci_mcasp_driver = {
2200 .probe = davinci_mcasp_probe, 2191 .probe = davinci_mcasp_probe,
2201 .remove = davinci_mcasp_remove, 2192 .remove = davinci_mcasp_remove,
2202 .driver = { 2193 .driver = {
2203 .name = "davinci-mcasp", 2194 .name = "davinci-mcasp",
2195 .pm = &davinci_mcasp_pm_ops,
2204 .of_match_table = mcasp_dt_ids, 2196 .of_match_table = mcasp_dt_ids,
2205 }, 2197 },
2206}; 2198};
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
index 25e287feb58c..723a583a8d57 100644
--- a/sound/soc/xilinx/Kconfig
+++ b/sound/soc/xilinx/Kconfig
@@ -1,5 +1,5 @@
1config SND_SOC_XILINX_I2S 1config SND_SOC_XILINX_I2S
2 tristate "Audio support for the the Xilinx I2S" 2 tristate "Audio support for the Xilinx I2S"
3 help 3 help
4 Select this option to enable Xilinx I2S Audio. This enables 4 Select this option to enable Xilinx I2S Audio. This enables
5 I2S playback and capture using xilinx soft IP. In transmitter 5 I2S playback and capture using xilinx soft IP. In transmitter
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
index d4ae9eff41ce..8b353166ad44 100644
--- a/sound/soc/xilinx/xlnx_i2s.c
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -1,12 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2//
3 * Xilinx ASoC I2S audio support 3// Xilinx ASoC I2S audio support
4 * 4//
5 * Copyright (C) 2018 Xilinx, Inc. 5// Copyright (C) 2018 Xilinx, Inc.
6 * 6//
7 * Author: Praveen Vuppala <praveenv@xilinx.com> 7// Author: Praveen Vuppala <praveenv@xilinx.com>
8 * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com> 8// Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
9 */
10 9
11#include <linux/io.h> 10#include <linux/io.h>
12#include <linux/module.h> 11#include <linux/module.h>
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index ebbadb3a7094..7e65fe853ee3 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1492,6 +1492,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1492 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1493 break; 1493 break;
1494 1494
1495 case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
1495 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ 1496 case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
1496 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1497 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
1497 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ 1498 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
@@ -1566,6 +1567,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1566 case 0x20b1: /* XMOS based devices */ 1567 case 0x20b1: /* XMOS based devices */
1567 case 0x152a: /* Thesycon devices */ 1568 case 0x152a: /* Thesycon devices */
1568 case 0x25ce: /* Mytek devices */ 1569 case 0x25ce: /* Mytek devices */
1570 case 0x2ab6: /* T+A devices */
1569 if (fp->dsd_raw) 1571 if (fp->dsd_raw)
1570 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1572 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1571 break; 1573 break;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d079f36d342d..ac221f137ed2 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1681,13 +1681,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
1681 .force_header = false, 1681 .force_header = false,
1682 }; 1682 };
1683 struct perf_evsel *ev2; 1683 struct perf_evsel *ev2;
1684 static bool init;
1685 u64 val; 1684 u64 val;
1686 1685
1687 if (!init) {
1688 perf_stat__init_shadow_stats();
1689 init = true;
1690 }
1691 if (!evsel->stats) 1686 if (!evsel->stats)
1692 perf_evlist__alloc_stats(script->session->evlist, false); 1687 perf_evlist__alloc_stats(script->session->evlist, false);
1693 if (evsel_script(evsel->leader)->gnum++ == 0) 1688 if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1794,7 +1789,7 @@ static void process_event(struct perf_script *script,
1794 return; 1789 return;
1795 } 1790 }
1796 1791
1797 if (PRINT_FIELD(TRACE)) { 1792 if (PRINT_FIELD(TRACE) && sample->raw_data) {
1798 event_format__fprintf(evsel->tp_format, sample->cpu, 1793 event_format__fprintf(evsel->tp_format, sample->cpu,
1799 sample->raw_data, sample->raw_size, fp); 1794 sample->raw_data, sample->raw_size, fp);
1800 } 1795 }
@@ -2359,6 +2354,8 @@ static int __cmd_script(struct perf_script *script)
2359 2354
2360 signal(SIGINT, sig_handler); 2355 signal(SIGINT, sig_handler);
2361 2356
2357 perf_stat__init_shadow_stats();
2358
2362 /* override event processing functions */ 2359 /* override event processing functions */
2363 if (script->show_task_events) { 2360 if (script->show_task_events) {
2364 script->tool.comm = process_comm_event; 2361 script->tool.comm = process_comm_event;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 1d00e5ec7906..82e16bf84466 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -224,20 +224,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
224 return ret; 224 return ret;
225} 225}
226 226
227static int disasm__cmp(struct annotation_line *a, struct annotation_line *b) 227static double disasm__cmp(struct annotation_line *a, struct annotation_line *b,
228 int percent_type)
228{ 229{
229 int i; 230 int i;
230 231
231 for (i = 0; i < a->data_nr; i++) { 232 for (i = 0; i < a->data_nr; i++) {
232 if (a->data[i].percent == b->data[i].percent) 233 if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type])
233 continue; 234 continue;
234 return a->data[i].percent < b->data[i].percent; 235 return a->data[i].percent[percent_type] -
236 b->data[i].percent[percent_type];
235 } 237 }
236 return 0; 238 return 0;
237} 239}
238 240
239static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al) 241static void disasm_rb_tree__insert(struct annotate_browser *browser,
242 struct annotation_line *al)
240{ 243{
244 struct rb_root *root = &browser->entries;
241 struct rb_node **p = &root->rb_node; 245 struct rb_node **p = &root->rb_node;
242 struct rb_node *parent = NULL; 246 struct rb_node *parent = NULL;
243 struct annotation_line *l; 247 struct annotation_line *l;
@@ -246,7 +250,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line
246 parent = *p; 250 parent = *p;
247 l = rb_entry(parent, struct annotation_line, rb_node); 251 l = rb_entry(parent, struct annotation_line, rb_node);
248 252
249 if (disasm__cmp(al, l)) 253 if (disasm__cmp(al, l, browser->opts->percent_type) < 0)
250 p = &(*p)->rb_left; 254 p = &(*p)->rb_left;
251 else 255 else
252 p = &(*p)->rb_right; 256 p = &(*p)->rb_right;
@@ -329,7 +333,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
329 RB_CLEAR_NODE(&pos->al.rb_node); 333 RB_CLEAR_NODE(&pos->al.rb_node);
330 continue; 334 continue;
331 } 335 }
332 disasm_rb_tree__insert(&browser->entries, &pos->al); 336 disasm_rb_tree__insert(browser, &pos->al);
333 } 337 }
334 pthread_mutex_unlock(&notes->lock); 338 pthread_mutex_unlock(&notes->lock);
335 339
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd3342069..383674f448fc 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
134 if (!cpu_list) 134 if (!cpu_list)
135 return cpu_map__read_all_cpu_map(); 135 return cpu_map__read_all_cpu_map();
136 136
137 if (!isdigit(*cpu_list)) 137 /*
138 * must handle the case of empty cpumap to cover
139 * TOPOLOGY header for NUMA nodes with no CPU
140 * ( e.g., because of CPU hotplug)
141 */
142 if (!isdigit(*cpu_list) && *cpu_list != '\0')
138 goto out; 143 goto out;
139 144
140 while (isdigit(*cpu_list)) { 145 while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
181 186
182 if (nr_cpus > 0) 187 if (nr_cpus > 0)
183 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); 188 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
184 else 189 else if (*cpu_list != '\0')
185 cpus = cpu_map__default_new(); 190 cpus = cpu_map__default_new();
191 else
192 cpus = cpu_map__dummy_new();
186invalid: 193invalid:
187 free(tmp_cpus); 194 free(tmp_cpus);
188out: 195out:
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 897589507d97..ea523d3b248f 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe)
391 * Current buffer might not have all the events allocated 391 * Current buffer might not have all the events allocated
392 * yet, we need to free only allocated ones ... 392 * yet, we need to free only allocated ones ...
393 */ 393 */
394 list_del(&oe->buffer->list); 394 if (oe->buffer) {
395 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); 395 list_del(&oe->buffer->list);
396 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
397 }
396 398
397 /* ... and continue with the rest */ 399 /* ... and continue with the rest */
398 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { 400 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 63f758c655d5..64d1f36dee99 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -17,6 +17,8 @@ if cc == "clang":
17 vars[var] = sub("-mcet", "", vars[var]) 17 vars[var] = sub("-mcet", "", vars[var])
18 if not clang_has_option("-fcf-protection"): 18 if not clang_has_option("-fcf-protection"):
19 vars[var] = sub("-fcf-protection", "", vars[var]) 19 vars[var] = sub("-fcf-protection", "", vars[var])
20 if not clang_has_option("-fstack-clash-protection"):
21 vars[var] = sub("-fstack-clash-protection", "", vars[var])
20 22
21from distutils.core import setup, Extension 23from distutils.core import setup, Extension
22 24
diff --git a/tools/testing/nvdimm/dimm_devs.c b/tools/testing/nvdimm/dimm_devs.c
index e75238404555..2d4baf57822f 100644
--- a/tools/testing/nvdimm/dimm_devs.c
+++ b/tools/testing/nvdimm/dimm_devs.c
@@ -18,8 +18,8 @@ ssize_t security_show(struct device *dev,
18 * For the test version we need to poll the "hardware" in order 18 * For the test version we need to poll the "hardware" in order
19 * to get the updated status for unlock testing. 19 * to get the updated status for unlock testing.
20 */ 20 */
21 nvdimm->sec.state = nvdimm_security_state(nvdimm, false); 21 nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
22 nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, true); 22 nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
23 23
24 switch (nvdimm->sec.state) { 24 switch (nvdimm->sec.state) {
25 case NVDIMM_SECURITY_DISABLED: 25 case NVDIMM_SECURITY_DISABLED:
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd025a6..0d26b5e3f966 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@ prerequisite()
37 exit $ksft_skip 37 exit $ksft_skip
38 fi 38 fi
39 39
40 present_cpus=`cat $SYSFS/devices/system/cpu/present`
41 present_max=${present_cpus##*-}
42 echo "present_cpus = $present_cpus present_max = $present_max"
43
40 echo -e "\t Cpus in online state: $online_cpus" 44 echo -e "\t Cpus in online state: $online_cpus"
41 45
42 offline_cpus=`cat $SYSFS/devices/system/cpu/offline` 46 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@ online_cpus=0
151online_max=0 155online_max=0
152offline_cpus=0 156offline_cpus=0
153offline_max=0 157offline_max=0
158present_cpus=0
159present_max=0
154 160
155while getopts e:ahp: opt; do 161while getopts e:ahp: opt; do
156 case $opt in 162 case $opt in
@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
190 online_cpu_expect_success $online_max 196 online_cpu_expect_success $online_max
191 197
192 if [[ $offline_cpus -gt 0 ]]; then 198 if [[ $offline_cpus -gt 0 ]]; then
193 echo -e "\t offline to online to offline: cpu $offline_max" 199 echo -e "\t offline to online to offline: cpu $present_max"
194 online_cpu_expect_success $offline_max 200 online_cpu_expect_success $present_max
195 offline_cpu_expect_success $offline_max 201 offline_cpu_expect_success $present_max
202 online_cpu $present_max
196 fi 203 fi
197 exit 0 204 exit 0
198else 205else
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index f8d468f54e98..aaa1e9f083c3 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -37,7 +37,7 @@ static int get_debugfs(char **path)
37 struct libmnt_table *tb; 37 struct libmnt_table *tb;
38 struct libmnt_iter *itr = NULL; 38 struct libmnt_iter *itr = NULL;
39 struct libmnt_fs *fs; 39 struct libmnt_fs *fs;
40 int found = 0; 40 int found = 0, ret;
41 41
42 cxt = mnt_new_context(); 42 cxt = mnt_new_context();
43 if (!cxt) 43 if (!cxt)
@@ -58,8 +58,11 @@ static int get_debugfs(char **path)
58 break; 58 break;
59 } 59 }
60 } 60 }
61 if (found) 61 if (found) {
62 asprintf(path, "%s/gpio", mnt_fs_get_target(fs)); 62 ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
63 if (ret < 0)
64 err(EXIT_FAILURE, "failed to format string");
65 }
63 66
64 mnt_free_iter(itr); 67 mnt_free_iter(itr);
65 mnt_free_context(cxt); 68 mnt_free_context(cxt);
diff --git a/tools/testing/selftests/ir/Makefile b/tools/testing/selftests/ir/Makefile
index f4ba8eb84b95..ad06489c22a5 100644
--- a/tools/testing/selftests/ir/Makefile
+++ b/tools/testing/selftests/ir/Makefile
@@ -1,5 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2TEST_PROGS := ir_loopback.sh 2TEST_PROGS := ir_loopback.sh
3TEST_GEN_PROGS_EXTENDED := ir_loopback 3TEST_GEN_PROGS_EXTENDED := ir_loopback
4APIDIR := ../../../include/uapi
5CFLAGS += -Wall -O2 -I$(APIDIR)
4 6
5include ../lib.mk 7include ../lib.mk
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 23022e9d32eb..b52cfdefecbf 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -571,7 +571,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
571 * already exist. 571 * already exist.
572 */ 572 */
573 region = (struct userspace_mem_region *) userspace_mem_region_find( 573 region = (struct userspace_mem_region *) userspace_mem_region_find(
574 vm, guest_paddr, guest_paddr + npages * vm->page_size); 574 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
575 if (region != NULL) 575 if (region != NULL)
576 TEST_ASSERT(false, "overlapping userspace_mem_region already " 576 TEST_ASSERT(false, "overlapping userspace_mem_region already "
577 "exists\n" 577 "exists\n"
@@ -587,15 +587,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
587 region = region->next) { 587 region = region->next) {
588 if (region->region.slot == slot) 588 if (region->region.slot == slot)
589 break; 589 break;
590 if ((guest_paddr <= (region->region.guest_phys_addr
591 + region->region.memory_size))
592 && ((guest_paddr + npages * vm->page_size)
593 >= region->region.guest_phys_addr))
594 break;
595 } 590 }
596 if (region != NULL) 591 if (region != NULL)
597 TEST_ASSERT(false, "A mem region with the requested slot " 592 TEST_ASSERT(false, "A mem region with the requested slot "
598 "or overlapping physical memory range already exists.\n" 593 "already exists.\n"
599 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" 594 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
600 " existing slot: %u paddr: 0x%lx size: 0x%lx", 595 " existing slot: %u paddr: 0x%lx size: 0x%lx",
601 slot, guest_paddr, npages, 596 slot, guest_paddr, npages,
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index ea3c73e8f4f6..c49c2a28b0eb 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -103,6 +103,12 @@ int main(int argc, char *argv[])
103 103
104 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 104 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
105 105
106 /* KVM should return supported EVMCS version range */
107 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
108 (evmcs_ver & 0xff) > 0,
109 "Incorrect EVMCS version range: %x:%x\n",
110 evmcs_ver & 0xff, evmcs_ver >> 8);
111
106 run = vcpu_state(vm, VCPU_ID); 112 run = vcpu_state(vm, VCPU_ID);
107 113
108 vcpu_regs_get(vm, VCPU_ID, &regs1); 114 vcpu_regs_get(vm, VCPU_ID, &regs1);
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index f8f3e90700c0..1e6d14d2825c 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
21KSFT_KHDR_INSTALL := 1 21KSFT_KHDR_INSTALL := 1
22include ../lib.mk 22include ../lib.mk
23 23
24$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma 24$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
25$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread 25$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
26$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread 26$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
index 8db35b99457c..71d7fdc513c1 100755
--- a/tools/testing/selftests/net/xfrm_policy.sh
+++ b/tools/testing/selftests/net/xfrm_policy.sh
@@ -28,6 +28,19 @@ KEY_AES=0x0123456789abcdef0123456789012345
28SPI1=0x1 28SPI1=0x1
29SPI2=0x2 29SPI2=0x2
30 30
31do_esp_policy() {
32 local ns=$1
33 local me=$2
34 local remote=$3
35 local lnet=$4
36 local rnet=$5
37
38 # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
39 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
40 # to fwd decrypted packets after esp processing:
41 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
42}
43
31do_esp() { 44do_esp() {
32 local ns=$1 45 local ns=$1
33 local me=$2 46 local me=$2
@@ -40,10 +53,59 @@ do_esp() {
40 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet 53 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
41 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet 54 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
42 55
43 # to encrypt packets as they go out (includes forwarded packets that need encapsulation) 56 do_esp_policy $ns $me $remote $lnet $rnet
44 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow 57}
45 # to fwd decrypted packets after esp processing: 58
46 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow 59# add policies with different netmasks, to make sure kernel carries
60# the policies contained within new netmask over when search tree is
61# re-built.
62# peer netns that are supposed to be encapsulated via esp have addresses
63# in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively.
64#
65# Adding a policy for '10.0.1.0/23' will make it necessary to
66# alter the prefix of 10.0.1.0 subnet.
67# In case new prefix overlaps with existing node, the node and all
68# policies it carries need to be merged with the existing one(s).
69#
70# Do that here.
71do_overlap()
72{
73 local ns=$1
74
75 # adds new nodes to tree (neither network exists yet in policy database).
76 ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
77
78 # adds a new node in the 10.0.0.0/24 tree (dst node exists).
79 ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block
80
81 # adds a 10.2.0.0/23 node, but for different dst.
82 ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block
83
84 # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd.
85 # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23.
86 # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node
87 # also has to be merged too, including source-sorted subtrees.
88 # old:
89 # 10.0.0.0/24 (node 1 in dst tree of the bin)
90 # 10.1.0.0/24 (node in src tree of dst node 1)
91 # 10.2.0.0/24 (node in src tree of dst node 1)
92 # 10.0.1.0/24 (node 2 in dst tree of the bin)
93 # 10.0.2.0/24 (node in src tree of dst node 2)
94 # 10.2.0.0/24 (node in src tree of dst node 2)
95 #
96 # The next 'policy add' adds dst '10.0.0.0/23', which means
97 # that dst node 1 and dst node 2 have to be merged including
98 # the sub-tree. As no duplicates are allowed, policies in
99 # the two '10.0.2.0/24' are also merged.
100 #
101 # after the 'add', internal search tree should look like this:
102 # 10.0.0.0/23 (node in dst tree of bin)
103 # 10.0.2.0/24 (node in src tree of dst node)
104 # 10.1.0.0/24 (node in src tree of dst node)
105 # 10.2.0.0/24 (node in src tree of dst node)
106 #
107 # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
108 ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
47} 109}
48 110
49do_esp_policy_get_check() { 111do_esp_policy_get_check() {
@@ -160,6 +222,41 @@ check_xfrm() {
160 return $lret 222 return $lret
161} 223}
162 224
225check_exceptions()
226{
227 logpostfix="$1"
228 local lret=0
229
230 # ping to .254 should be excluded from the tunnel (exception is in place).
231 check_xfrm 0 254
232 if [ $? -ne 0 ]; then
233 echo "FAIL: expected ping to .254 to fail ($logpostfix)"
234 lret=1
235 else
236 echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)"
237 fi
238
239 # ping to .253 should use use ipsec due to direct policy exception.
240 check_xfrm 1 253
241 if [ $? -ne 0 ]; then
242 echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)"
243 lret=1
244 else
245 echo "PASS: direct policy matches ($logpostfix)"
246 fi
247
248 # ping to .2 should use ipsec.
249 check_xfrm 1 2
250 if [ $? -ne 0 ]; then
251 echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)"
252 lret=1
253 else
254 echo "PASS: policy matches ($logpostfix)"
255 fi
256
257 return $lret
258}
259
163#check for needed privileges 260#check for needed privileges
164if [ "$(id -u)" -ne 0 ];then 261if [ "$(id -u)" -ne 0 ];then
165 echo "SKIP: Need root privileges" 262 echo "SKIP: Need root privileges"
@@ -270,33 +367,45 @@ do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28
270do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96 367do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96
271do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96 368do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96
272 369
273# ping to .254 should now be excluded from the tunnel 370check_exceptions "exceptions"
274check_xfrm 0 254
275if [ $? -ne 0 ]; then 371if [ $? -ne 0 ]; then
276 echo "FAIL: expected ping to .254 to fail"
277 ret=1 372 ret=1
278else
279 echo "PASS: ping to .254 bypassed ipsec tunnel"
280fi 373fi
281 374
282# ping to .253 should use use ipsec due to direct policy exception. 375# insert block policies with adjacent/overlapping netmasks
283check_xfrm 1 253 376do_overlap ns3
284if [ $? -ne 0 ]; then
285 echo "FAIL: expected ping to .253 to use ipsec tunnel"
286 ret=1
287else
288 echo "PASS: direct policy matches"
289fi
290 377
291# ping to .2 should use ipsec. 378check_exceptions "exceptions and block policies"
292check_xfrm 1 2
293if [ $? -ne 0 ]; then 379if [ $? -ne 0 ]; then
294 echo "FAIL: expected ping to .2 to use ipsec tunnel"
295 ret=1 380 ret=1
296else
297 echo "PASS: policy matches"
298fi 381fi
299 382
383for n in ns3 ns4;do
384 ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125
385 sleep $((RANDOM%5))
386done
387
388check_exceptions "exceptions and block policies after hresh changes"
389
390# full flush of policy db, check everything gets freed incl. internal meta data
391ip -net ns3 xfrm policy flush
392
393do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24
394do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28
395
396# move inexact policies to hash table
397ip -net ns3 xfrm policy set hthresh4 16 16
398
399sleep $((RANDOM%5))
400check_exceptions "exceptions and block policies after hthresh change in ns3"
401
402# restore original hthresh settings -- move policies back to tables
403for n in ns3 ns4;do
404 ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128
405 sleep $((RANDOM%5))
406done
407check_exceptions "exceptions and block policies after hresh change to normal"
408
300for i in 1 2 3 4;do ip netns del ns$i;done 409for i in 1 2 3 4;do ip netns del ns$i;done
301 410
302exit $ret 411exit $ret
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a81681f..29bac5ef9a93 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,4 +10,5 @@
10/proc-uptime-002 10/proc-uptime-002
11/read 11/read
12/self 12/self
13/setns-dcache
13/thread-self 14/thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34cf85d..434d033ee067 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
14TEST_GEN_PROGS += proc-uptime-002 14TEST_GEN_PROGS += proc-uptime-002
15TEST_GEN_PROGS += read 15TEST_GEN_PROGS += read
16TEST_GEN_PROGS += self 16TEST_GEN_PROGS += self
17TEST_GEN_PROGS += setns-dcache
17TEST_GEN_PROGS += thread-self 18TEST_GEN_PROGS += thread-self
18 19
19include ../lib.mk 20include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 000000000000..60ab197a73fc
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16/*
17 * Test that setns(CLONE_NEWNET) points to new /proc/net content even
18 * if old one is in dcache.
19 *
20 * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
21 */
22#undef NDEBUG
23#include <assert.h>
24#include <errno.h>
25#include <sched.h>
26#include <signal.h>
27#include <stdio.h>
28#include <stdlib.h>
29#include <string.h>
30#include <unistd.h>
31#include <sys/types.h>
32#include <sys/stat.h>
33#include <fcntl.h>
34#include <sys/socket.h>
35
36static pid_t pid = -1;
37
38static void f(void)
39{
40 if (pid > 0) {
41 kill(pid, SIGTERM);
42 }
43}
44
45int main(void)
46{
47 int fd[2];
48 char _ = 0;
49 int nsfd;
50
51 atexit(f);
52
53 /* Check for priviledges and syscall availability straight away. */
54 if (unshare(CLONE_NEWNET) == -1) {
55 if (errno == ENOSYS || errno == EPERM) {
56 return 4;
57 }
58 return 1;
59 }
60 /* Distinguisher between two otherwise empty net namespaces. */
61 if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
62 return 1;
63 }
64
65 if (pipe(fd) == -1) {
66 return 1;
67 }
68
69 pid = fork();
70 if (pid == -1) {
71 return 1;
72 }
73
74 if (pid == 0) {
75 if (unshare(CLONE_NEWNET) == -1) {
76 return 1;
77 }
78
79 if (write(fd[1], &_, 1) != 1) {
80 return 1;
81 }
82
83 pause();
84
85 return 0;
86 }
87
88 if (read(fd[0], &_, 1) != 1) {
89 return 1;
90 }
91
92 {
93 char buf[64];
94 snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
95 nsfd = open(buf, O_RDONLY);
96 if (nsfd == -1) {
97 return 1;
98 }
99 }
100
101 /* Reliably pin dentry into dcache. */
102 (void)open("/proc/net/unix", O_RDONLY);
103
104 if (setns(nsfd, CLONE_NEWNET) == -1) {
105 return 1;
106 }
107
108 kill(pid, SIGTERM);
109 pid = 0;
110
111 {
112 char buf[4096];
113 ssize_t rv;
114 int fd;
115
116 fd = open("/proc/net/unix", O_RDONLY);
117 if (fd == -1) {
118 return 1;
119 }
120
121#define S "Num RefCount Protocol Flags Type St Inode Path\n"
122 rv = read(fd, buf, sizeof(buf));
123
124 assert(rv == strlen(S));
125 assert(memcmp(buf, S, strlen(S)) == 0);
126 }
127
128 return 0;
129}
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e20b017e7073..b2065536d407 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) {
145 145
146 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); 146 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
147 ASSERT_NE(-1, rc); 147 ASSERT_NE(-1, rc);
148 EXPECT_NE(0, rc); 148 ASSERT_NE(0, rc);
149 149
150 /* Disable alarm interrupts */ 150 /* Disable alarm interrupts */
151 rc = ioctl(self->fd, RTC_AIE_OFF, 0); 151 rc = ioctl(self->fd, RTC_AIE_OFF, 0);
152 ASSERT_NE(-1, rc); 152 ASSERT_NE(-1, rc);
153 153
154 if (rc == 0)
155 return;
156
157 rc = read(self->fd, &data, sizeof(unsigned long)); 154 rc = read(self->fd, &data, sizeof(unsigned long));
158 ASSERT_NE(-1, rc); 155 ASSERT_NE(-1, rc);
159 TH_LOG("data: %lx", data); 156 TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) {
202 199
203 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv); 200 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
204 ASSERT_NE(-1, rc); 201 ASSERT_NE(-1, rc);
205 EXPECT_NE(0, rc); 202 ASSERT_NE(0, rc);
203
204 rc = read(self->fd, &data, sizeof(unsigned long));
205 ASSERT_NE(-1, rc);
206
207 rc = ioctl(self->fd, RTC_RD_TIME, &tm);
208 ASSERT_NE(-1, rc);
209
210 new = timegm((struct tm *)&tm);
211 ASSERT_EQ(new, secs);
212}
213
214TEST_F(rtc, alarm_alm_set_minute) {
215 struct timeval tv = { .tv_sec = 62 };
216 unsigned long data;
217 struct rtc_time tm;
218 fd_set readfds;
219 time_t secs, new;
220 int rc;
221
222 rc = ioctl(self->fd, RTC_RD_TIME, &tm);
223 ASSERT_NE(-1, rc);
224
225 secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
226 gmtime_r(&secs, (struct tm *)&tm);
227
228 rc = ioctl(self->fd, RTC_ALM_SET, &tm);
229 if (rc == -1) {
230 ASSERT_EQ(EINVAL, errno);
231 TH_LOG("skip alarms are not supported.");
232 return;
233 }
234
235 rc = ioctl(self->fd, RTC_ALM_READ, &tm);
236 ASSERT_NE(-1, rc);
237
238 TH_LOG("Alarm time now set to %02d:%02d:%02d.",
239 tm.tm_hour, tm.tm_min, tm.tm_sec);
240
241 /* Enable alarm interrupts */
242 rc = ioctl(self->fd, RTC_AIE_ON, 0);
243 ASSERT_NE(-1, rc);
244
245 FD_ZERO(&readfds);
246 FD_SET(self->fd, &readfds);
247
248 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
249 ASSERT_NE(-1, rc);
250 ASSERT_NE(0, rc);
251
252 /* Disable alarm interrupts */
253 rc = ioctl(self->fd, RTC_AIE_OFF, 0);
254 ASSERT_NE(-1, rc);
255
256 rc = read(self->fd, &data, sizeof(unsigned long));
257 ASSERT_NE(-1, rc);
258 TH_LOG("data: %lx", data);
259
260 rc = ioctl(self->fd, RTC_RD_TIME, &tm);
261 ASSERT_NE(-1, rc);
262
263 new = timegm((struct tm *)&tm);
264 ASSERT_EQ(new, secs);
265}
266
267TEST_F(rtc, alarm_wkalm_set_minute) {
268 struct timeval tv = { .tv_sec = 62 };
269 struct rtc_wkalrm alarm = { 0 };
270 struct rtc_time tm;
271 unsigned long data;
272 fd_set readfds;
273 time_t secs, new;
274 int rc;
275
276 rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
277 ASSERT_NE(-1, rc);
278
279 secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
280 gmtime_r(&secs, (struct tm *)&alarm.time);
281
282 alarm.enabled = 1;
283
284 rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
285 if (rc == -1) {
286 ASSERT_EQ(EINVAL, errno);
287 TH_LOG("skip alarms are not supported.");
288 return;
289 }
290
291 rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
292 ASSERT_NE(-1, rc);
293
294 TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
295 alarm.time.tm_mday, alarm.time.tm_mon + 1,
296 alarm.time.tm_year + 1900, alarm.time.tm_hour,
297 alarm.time.tm_min, alarm.time.tm_sec);
298
299 FD_ZERO(&readfds);
300 FD_SET(self->fd, &readfds);
301
302 rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
303 ASSERT_NE(-1, rc);
304 ASSERT_NE(0, rc);
206 305
207 rc = read(self->fd, &data, sizeof(unsigned long)); 306 rc = read(self->fd, &data, sizeof(unsigned long));
208 ASSERT_NE(-1, rc); 307 ASSERT_NE(-1, rc);
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index fce7f4ce0692..1760b3e39730 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -9,7 +9,7 @@ BINARIES := seccomp_bpf seccomp_benchmark
9CFLAGS += -Wl,-no-as-needed -Wall 9CFLAGS += -Wl,-no-as-needed -Wall
10 10
11seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h 11seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
12 $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@ 12 $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
13 13
14TEST_PROGS += $(BINARIES) 14TEST_PROGS += $(BINARIES)
15EXTRA_CLEAN := $(BINARIES) 15EXTRA_CLEAN := $(BINARIES)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 067cb4607d6c..7e632b465ab4 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
1608#ifdef SYSCALL_NUM_RET_SHARE_REG 1608#ifdef SYSCALL_NUM_RET_SHARE_REG
1609# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1609# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
1610#else 1610#else
1611# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) 1611# define EXPECT_SYSCALL_RETURN(val, action) \
1612 do { \
1613 errno = 0; \
1614 if (val < 0) { \
1615 EXPECT_EQ(-1, action); \
1616 EXPECT_EQ(-(val), errno); \
1617 } else { \
1618 EXPECT_EQ(val, action); \
1619 } \
1620 } while (0)
1612#endif 1621#endif
1613 1622
1614/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1623/* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
1647 1656
1648/* Architecture-specific syscall changing routine. */ 1657/* Architecture-specific syscall changing routine. */
1649void change_syscall(struct __test_metadata *_metadata, 1658void change_syscall(struct __test_metadata *_metadata,
1650 pid_t tracee, int syscall) 1659 pid_t tracee, int syscall, int result)
1651{ 1660{
1652 int ret; 1661 int ret;
1653 ARCH_REGS regs; 1662 ARCH_REGS regs;
@@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata,
1706#ifdef SYSCALL_NUM_RET_SHARE_REG 1715#ifdef SYSCALL_NUM_RET_SHARE_REG
1707 TH_LOG("Can't modify syscall return on this architecture"); 1716 TH_LOG("Can't modify syscall return on this architecture");
1708#else 1717#else
1709 regs.SYSCALL_RET = EPERM; 1718 regs.SYSCALL_RET = result;
1710#endif 1719#endif
1711 1720
1712#ifdef HAVE_GETREGS 1721#ifdef HAVE_GETREGS
@@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee,
1734 case 0x1002: 1743 case 0x1002:
1735 /* change getpid to getppid. */ 1744 /* change getpid to getppid. */
1736 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1745 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
1737 change_syscall(_metadata, tracee, __NR_getppid); 1746 change_syscall(_metadata, tracee, __NR_getppid, 0);
1738 break; 1747 break;
1739 case 0x1003: 1748 case 0x1003:
1740 /* skip gettid. */ 1749 /* skip gettid with valid return code. */
1741 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 1750 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
1742 change_syscall(_metadata, tracee, -1); 1751 change_syscall(_metadata, tracee, -1, 45000);
1743 break; 1752 break;
1744 case 0x1004: 1753 case 0x1004:
1754 /* skip openat with error. */
1755 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
1756 change_syscall(_metadata, tracee, -1, -ESRCH);
1757 break;
1758 case 0x1005:
1745 /* do nothing (allow getppid) */ 1759 /* do nothing (allow getppid) */
1746 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 1760 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
1747 break; 1761 break;
@@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
1774 nr = get_syscall(_metadata, tracee); 1788 nr = get_syscall(_metadata, tracee);
1775 1789
1776 if (nr == __NR_getpid) 1790 if (nr == __NR_getpid)
1777 change_syscall(_metadata, tracee, __NR_getppid); 1791 change_syscall(_metadata, tracee, __NR_getppid, 0);
1792 if (nr == __NR_gettid)
1793 change_syscall(_metadata, tracee, -1, 45000);
1778 if (nr == __NR_openat) 1794 if (nr == __NR_openat)
1779 change_syscall(_metadata, tracee, -1); 1795 change_syscall(_metadata, tracee, -1, -ESRCH);
1780} 1796}
1781 1797
1782FIXTURE_DATA(TRACE_syscall) { 1798FIXTURE_DATA(TRACE_syscall) {
@@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall)
1793 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 1809 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
1794 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 1810 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
1795 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 1811 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
1796 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1812 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
1797 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 1813 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
1814 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
1815 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
1798 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1816 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
1799 }; 1817 };
1800 1818
@@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
1842 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1860 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1843} 1861}
1844 1862
1845TEST_F(TRACE_syscall, ptrace_syscall_dropped) 1863TEST_F(TRACE_syscall, ptrace_syscall_errno)
1864{
1865 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1866 teardown_trace_fixture(_metadata, self->tracer);
1867 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1868 true);
1869
1870 /* Tracer should skip the open syscall, resulting in ESRCH. */
1871 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1872}
1873
1874TEST_F(TRACE_syscall, ptrace_syscall_faked)
1846{ 1875{
1847 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1876 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
1848 teardown_trace_fixture(_metadata, self->tracer); 1877 teardown_trace_fixture(_metadata, self->tracer);
1849 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1878 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
1850 true); 1879 true);
1851 1880
1852 /* Tracer should skip the open syscall, resulting in EPERM. */ 1881 /* Tracer should skip the gettid syscall, resulting fake pid. */
1853 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); 1882 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1854} 1883}
1855 1884
1856TEST_F(TRACE_syscall, syscall_allowed) 1885TEST_F(TRACE_syscall, syscall_allowed)
@@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
1883 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1912 EXPECT_NE(self->mypid, syscall(__NR_getpid));
1884} 1913}
1885 1914
1886TEST_F(TRACE_syscall, syscall_dropped) 1915TEST_F(TRACE_syscall, syscall_errno)
1916{
1917 long ret;
1918
1919 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
1920 ASSERT_EQ(0, ret);
1921
1922 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
1923 ASSERT_EQ(0, ret);
1924
1925 /* openat has been skipped and an errno return. */
1926 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
1927}
1928
1929TEST_F(TRACE_syscall, syscall_faked)
1887{ 1930{
1888 long ret; 1931 long ret;
1889 1932
@@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
1894 ASSERT_EQ(0, ret); 1937 ASSERT_EQ(0, ret);
1895 1938
1896 /* gettid has been skipped and an altered return value stored. */ 1939 /* gettid has been skipped and an altered return value stored. */
1897 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); 1940 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
1898 EXPECT_NE(self->mytid, syscall(__NR_gettid));
1899} 1941}
1900 1942
1901TEST_F(TRACE_syscall, skip_after_RET_TRACE) 1943TEST_F(TRACE_syscall, skip_after_RET_TRACE)
@@ -3044,7 +3086,7 @@ TEST(user_notification_basic)
3044 /* Check that the basic notification machinery works */ 3086 /* Check that the basic notification machinery works */
3045 listener = user_trap_syscall(__NR_getpid, 3087 listener = user_trap_syscall(__NR_getpid,
3046 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3088 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3047 EXPECT_GE(listener, 0); 3089 ASSERT_GE(listener, 0);
3048 3090
3049 /* Installing a second listener in the chain should EBUSY */ 3091 /* Installing a second listener in the chain should EBUSY */
3050 EXPECT_EQ(user_trap_syscall(__NR_getpid, 3092 EXPECT_EQ(user_trap_syscall(__NR_getpid,
@@ -3103,7 +3145,7 @@ TEST(user_notification_kill_in_middle)
3103 3145
3104 listener = user_trap_syscall(__NR_getpid, 3146 listener = user_trap_syscall(__NR_getpid,
3105 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3147 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3106 EXPECT_GE(listener, 0); 3148 ASSERT_GE(listener, 0);
3107 3149
3108 /* 3150 /*
3109 * Check that nothing bad happens when we kill the task in the middle 3151 * Check that nothing bad happens when we kill the task in the middle
@@ -3152,7 +3194,7 @@ TEST(user_notification_signal)
3152 3194
3153 listener = user_trap_syscall(__NR_gettid, 3195 listener = user_trap_syscall(__NR_gettid,
3154 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3196 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3155 EXPECT_GE(listener, 0); 3197 ASSERT_GE(listener, 0);
3156 3198
3157 pid = fork(); 3199 pid = fork();
3158 ASSERT_GE(pid, 0); 3200 ASSERT_GE(pid, 0);
@@ -3215,7 +3257,7 @@ TEST(user_notification_closed_listener)
3215 3257
3216 listener = user_trap_syscall(__NR_getpid, 3258 listener = user_trap_syscall(__NR_getpid,
3217 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3259 SECCOMP_FILTER_FLAG_NEW_LISTENER);
3218 EXPECT_GE(listener, 0); 3260 ASSERT_GE(listener, 0);
3219 3261
3220 /* 3262 /*
3221 * Check that we get an ENOSYS when the listener is closed. 3263 * Check that we get an ENOSYS when the listener is closed.
@@ -3376,7 +3418,7 @@ TEST(seccomp_get_notif_sizes)
3376{ 3418{
3377 struct seccomp_notif_sizes sizes; 3419 struct seccomp_notif_sizes sizes;
3378 3420
3379 EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); 3421 ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
3380 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); 3422 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
3381 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); 3423 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
3382} 3424}
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683cfb6c9..7656c7ce79d9 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2CFLAGS += -O3 -Wl,-no-as-needed -Wall 2CFLAGS += -O3 -Wl,-no-as-needed -Wall
3LDFLAGS += -lrt -lpthread -lm 3LDLIBS += -lrt -lpthread -lm
4 4
5# these are all "safe" tests that don't modify 5# these are all "safe" tests that don't modify
6# system time or require escalated privileges 6# system time or require escalated privileges
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 880b96fc80d4..c0534e298b51 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -25,6 +25,7 @@ struct gup_benchmark {
25 __u64 size; 25 __u64 size;
26 __u32 nr_pages_per_call; 26 __u32 nr_pages_per_call;
27 __u32 flags; 27 __u32 flags;
28 __u64 expansion[10]; /* For future use */
28}; 29};
29 30
30int main(int argc, char **argv) 31int main(int argc, char **argv)
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
index 50f7e9272481..bf1bb15b6fbe 100644
--- a/tools/testing/selftests/x86/mpx-mini-test.c
+++ b/tools/testing/selftests/x86/mpx-mini-test.c
@@ -1503,7 +1503,7 @@ exit:
1503 exit(20); 1503 exit(20);
1504 } 1504 }
1505 if (successes != total_nr_tests) { 1505 if (successes != total_nr_tests) {
1506 eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n", 1506 eprintf("ERROR: succeeded fewer than number of tries (%d != %d)\n",
1507 successes, total_nr_tests); 1507 successes, total_nr_tests);
1508 exit(21); 1508 exit(21);
1509 } 1509 }
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 460b4bdf4c1e..5d546dcdbc80 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
1133 pkey_assert(err); 1133 pkey_assert(err);
1134} 1134}
1135 1135
1136void become_child(void)
1137{
1138 pid_t forkret;
1139
1140 forkret = fork();
1141 pkey_assert(forkret >= 0);
1142 dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
1143
1144 if (!forkret) {
1145 /* in the child */
1146 return;
1147 }
1148 exit(0);
1149}
1150
1136/* Assumes that all pkeys other than 'pkey' are unallocated */ 1151/* Assumes that all pkeys other than 'pkey' are unallocated */
1137void test_pkey_alloc_exhaust(int *ptr, u16 pkey) 1152void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1138{ 1153{
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1141 int nr_allocated_pkeys = 0; 1156 int nr_allocated_pkeys = 0;
1142 int i; 1157 int i;
1143 1158
1144 for (i = 0; i < NR_PKEYS*2; i++) { 1159 for (i = 0; i < NR_PKEYS*3; i++) {
1145 int new_pkey; 1160 int new_pkey;
1146 dprintf1("%s() alloc loop: %d\n", __func__, i); 1161 dprintf1("%s() alloc loop: %d\n", __func__, i);
1147 new_pkey = alloc_pkey(); 1162 new_pkey = alloc_pkey();
@@ -1152,21 +1167,27 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1152 if ((new_pkey == -1) && (errno == ENOSPC)) { 1167 if ((new_pkey == -1) && (errno == ENOSPC)) {
1153 dprintf2("%s() failed to allocate pkey after %d tries\n", 1168 dprintf2("%s() failed to allocate pkey after %d tries\n",
1154 __func__, nr_allocated_pkeys); 1169 __func__, nr_allocated_pkeys);
1155 break; 1170 } else {
1171 /*
1172 * Ensure the number of successes never
1173 * exceeds the number of keys supported
1174 * in the hardware.
1175 */
1176 pkey_assert(nr_allocated_pkeys < NR_PKEYS);
1177 allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
1156 } 1178 }
1157 pkey_assert(nr_allocated_pkeys < NR_PKEYS); 1179
1158 allocated_pkeys[nr_allocated_pkeys++] = new_pkey; 1180 /*
1181 * Make sure that allocation state is properly
1182 * preserved across fork().
1183 */
1184 if (i == NR_PKEYS*2)
1185 become_child();
1159 } 1186 }
1160 1187
1161 dprintf3("%s()::%d\n", __func__, __LINE__); 1188 dprintf3("%s()::%d\n", __func__, __LINE__);
1162 1189
1163 /* 1190 /*
1164 * ensure it did not reach the end of the loop without
1165 * failure:
1166 */
1167 pkey_assert(i < NR_PKEYS*2);
1168
1169 /*
1170 * There are 16 pkeys supported in hardware. Three are 1191 * There are 16 pkeys supported in hardware. Three are
1171 * allocated by the time we get here: 1192 * allocated by the time we get here:
1172 * 1. The default key (0) 1193 * 1. The default key (0)
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
index 00a26a82fa98..97311333700e 100644
--- a/tools/testing/selftests/x86/unwind_vdso.c
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -44,7 +44,6 @@ int main()
44#include <stdbool.h> 44#include <stdbool.h>
45#include <sys/ptrace.h> 45#include <sys/ptrace.h>
46#include <sys/user.h> 46#include <sys/user.h>
47#include <sys/ucontext.h>
48#include <link.h> 47#include <link.h>
49#include <sys/auxv.h> 48#include <sys/auxv.h>
50#include <dlfcn.h> 49#include <dlfcn.h>
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5ecea812cb6a..585845203db8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3000,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
3000 if (ops->init) 3000 if (ops->init)
3001 ops->init(dev); 3001 ops->init(dev);
3002 3002
3003 kvm_get_kvm(kvm);
3003 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 3004 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
3004 if (ret < 0) { 3005 if (ret < 0) {
3006 kvm_put_kvm(kvm);
3005 mutex_lock(&kvm->lock); 3007 mutex_lock(&kvm->lock);
3006 list_del(&dev->vm_node); 3008 list_del(&dev->vm_node);
3007 mutex_unlock(&kvm->lock); 3009 mutex_unlock(&kvm->lock);
@@ -3009,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
3009 return ret; 3011 return ret;
3010 } 3012 }
3011 3013
3012 kvm_get_kvm(kvm);
3013 cd->fd = ret; 3014 cd->fd = ret;
3014 return 0; 3015 return 0;
3015} 3016}