aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/ABI/testing/sysfs-ata171
-rw-r--r--Documentation/ABI/testing/sysfs-block-device58
-rw-r--r--Documentation/ABI/testing/sysfs-class-scsi_host89
-rw-r--r--Documentation/accelerators/ocxl.rst5
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/mv-xor-v2.txt6
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt48
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usb3.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/networking/segmentation-offloads.txt18
-rw-r--r--Documentation/sphinx/kerneldoc.py3
-rw-r--r--MAINTAINERS11
-rw-r--r--Makefile11
-rw-r--r--arch/arm64/kernel/cpu_errata.c4
-rw-r--r--arch/arm64/kvm/guest.c3
-rw-r--r--arch/arm64/mm/mmu.c18
-rw-r--r--arch/h8300/include/asm/byteorder.h1
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/Kconfig.platform2
-rw-r--r--arch/microblaze/include/asm/setup.h1
-rw-r--r--arch/microblaze/lib/fastcopy.S4
-rw-r--r--arch/microblaze/mm/init.c64
-rw-r--r--arch/mips/ath25/board.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c8
-rw-r--r--arch/mips/loongson64/Kconfig6
-rw-r--r--arch/parisc/kernel/cache.c41
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c69
-rw-r--r--arch/powerpc/kvm/book3s_hv.c17
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S10
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/kernel/entry.S10
-rw-r--r--arch/s390/kernel/nospec-branch.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/sparc/mm/tlb.c19
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/entry/entry_64_compat.S16
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl38
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c16
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/ia32/sys_ia32.c74
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/microcode.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/sections.h1
-rw-r--r--arch/x86/include/asm/sys_ia32.h48
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/include/uapi/asm/mce.h1
-rw-r--r--arch/x86/kernel/cpu/intel.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c26
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c34
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c178
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c52
-rw-r--r--arch/x86/kernel/ioport.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/signal_compat.c65
-rw-r--r--arch/x86/kernel/vm86_32.c3
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/kvm/mmu.c4
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/mm/fault.c6
-rw-r--r--arch/x86/mm/init_64.c60
-rw-r--r--arch/x86/mm/pgtable.c48
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c3
-rw-r--r--drivers/acpi/acpi_watchdog.c4
-rw-r--r--drivers/acpi/battery.c48
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/acpi/numa.c10
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libahci_platform.c2
-rw-r--r--drivers/ata/libata-core.c26
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c12
-rw-r--r--drivers/ata/sata_rcar.c62
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c6
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/xen-blkfront.c17
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/bluetooth/hci_bcm.c13
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/clk-aspeed.c28
-rw-r--r--drivers/clk/clk.c46
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c2
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c20
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c6
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c2
-rw-r--r--drivers/clk/ti/clkctrl.c2
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/dma/mv_xor_v2.c25
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/efi/libstub/tpm.c4
-rw-r--r--drivers/gpio/gpio-rcar.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c50
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c170
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h4
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c71
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c22
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c40
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c97
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/tegra/dc.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/plane.c9
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c12
-rw-r--r--drivers/infiniband/core/addr.c15
-rw-r--r--drivers/infiniband/core/cma.c15
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/ucma.c42
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c113
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h25
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c51
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c34
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c15
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c19
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c10
-rw-r--r--drivers/input/keyboard/matrix_keypad.c4
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/touchscreen/mms114.c15
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c13
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c14
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/dm-bufio.c16
-rw-r--r--drivers/md/dm-mpath.c77
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/md/dm.c35
-rw-r--r--drivers/misc/ocxl/file.c27
-rw-r--r--drivers/mmc/core/block.c19
-rw-r--r--drivers/mmc/core/card.h1
-rw-r--r--drivers/mmc/core/quirks.h6
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c15
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c75
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c8
-rw-r--r--drivers/net/dsa/Makefile5
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h2
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c33
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h3
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig6
-rw-r--r--drivers/net/ethernet/natsemi/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c293
-rw-r--r--drivers/net/hyperv/rndis_filter.c68
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/phy/phy.c127
-rw-r--r--drivers/net/phy/phy_device.c32
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c26
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/region_devs.c17
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/fabrics.c5
-rw-r--r--drivers/nvme/host/fc.c27
-rw-r--r--drivers/nvme/host/multipath.c30
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c2
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c5
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/x86/Kconfig28
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/dell-smbios-base.c (renamed from drivers/platform/x86/dell-smbios.c)31
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c18
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c14
-rw-r--r--drivers/platform/x86/dell-smbios.h27
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/stm32-vrefbuf.c2
-rw-r--r--drivers/s390/block/dasd.c21
-rw-r--r--drivers/s390/cio/device_fsm.c7
-rw-r--r--drivers/s390/cio/device_ops.c72
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c33
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c42
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c23
-rw-r--r--drivers/scsi/qedi/qedi_fw.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c17
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c35
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/soc/fsl/qbman/qman.c28
-rw-r--r--drivers/staging/android/ashmem.c23
-rw-r--r--drivers/staging/comedi/drivers.c3
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c21
-rw-r--r--drivers/tty/serial/atmel_serial.c1
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/tty_io.c9
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/params.c6
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c1
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c20
-rw-r--r--drivers/usb/host/xhci-dbgtty.c20
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c11
-rw-r--r--drivers/usb/host/xhci-rcar.c4
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h23
-rw-r--r--drivers/usb/mon/mon_text.c126
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c3
-rw-r--r--drivers/usb/typec/tcpm.c163
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/video/fbdev/sbuslib.c4
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c3
-rw-r--r--drivers/watchdog/hpwdt.c501
-rw-r--r--drivers/watchdog/sbsa_gwdt.c3
-rw-r--r--drivers/watchdog/wdat_wdt.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c5
-rw-r--r--fs/aio.c44
-rw-r--r--fs/btrfs/backref.c1
-rw-r--r--fs/btrfs/raid56.c1
-rw-r--r--fs/btrfs/sysfs.c8
-rw-r--r--fs/btrfs/transaction.c20
-rw-r--r--fs/dcache.c21
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/hugetlbfs/inode.c17
-rw-r--r--fs/namei.c5
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/pnfs.c13
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c83
-rw-r--r--fs/nfsd/nfs4state.c62
-rw-r--r--fs/overlayfs/Kconfig14
-rw-r--r--fs/overlayfs/export.c216
-rw-r--r--fs/overlayfs/inode.c58
-rw-r--r--fs/overlayfs/namei.c6
-rw-r--r--fs/overlayfs/overlayfs.h1
-rw-r--r--fs/overlayfs/super.c1
-rw-r--r--fs/sysfs/symlink.c1
-rw-r--r--fs/xfs/xfs_iomap.c42
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--include/linux/cgroup-defs.h4
-rw-r--r--include/linux/compat.h30
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/if_tun.h4
-rw-r--r--include/linux/if_vlan.h66
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/irqchip/arm-gic.h1
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/of_pci.h17
-rw-r--r--include/linux/percpu-refcount.h18
-rw-r--r--include/linux/phy.h5
-rw-r--r--include/linux/rhashtable.h4
-rw-r--r--include/linux/skbuff.h22
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/u64_stats_sync.h22
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/mac80211.h4
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/sch_generic.h19
-rw-r--r--include/net/sock.h1
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/scsi/scsi_cmnd.h3
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/trace/events/mmc.h4
-rw-r--r--include/uapi/asm-generic/siginfo.h4
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/misc/ocxl.h17
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/cgroup/cgroup.c10
-rw-r--r--kernel/compat.c19
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/fail_function.c10
-rw-r--r--kernel/jump_label.c3
-rw-r--r--kernel/locking/rtmutex.c5
-rw-r--r--kernel/memremap.c1
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/trace/bpf_trace.c68
-rw-r--r--kernel/workqueue.c10
-rw-r--r--lib/btree.c10
-rw-r--r--lib/bug.c4
-rw-r--r--lib/ioremap.c6
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/rhashtable.c4
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_rhashtable.c134
-rw-r--r--mm/gup.c7
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/khugepaged.c15
-rw-r--r--mm/memblock.c28
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/page_alloc.c17
-rw-r--r--mm/percpu-km.c8
-rw-r--r--mm/percpu-vm.c18
-rw-r--r--mm/percpu.c67
-rw-r--r--mm/shmem.c31
-rw-r--r--mm/vmscan.c31
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/log.c1
-rw-r--r--net/batman-adv/multicast.c4
-rw-r--r--net/batman-adv/routing.c25
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bridge/netfilter/ebt_among.c34
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/core/dev.c22
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/filter.c60
-rw-r--r--net/core/skbuff.c11
-rw-r--r--net/core/sock.c21
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/dsa/legacy.c2
-rw-r--r--net/ieee802154/6lowpan/core.c12
-rw-r--r--net/ipv4/inet_diag.c3
-rw-r--r--net/ipv4/inet_fragment.c3
-rw-r--r--net/ipv4/route.c47
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c3
-rw-r--r--net/ipv4/xfrm4_policy.c5
-rw-r--r--net/ipv6/datagram.c21
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/route.c76
-rw-r--r--net/ipv6/seg6_iptunnel.c7
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c3
-rw-r--r--net/ipv6/xfrm6_policy.c5
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/kcm/kcmsock.c33
-rw-r--r--net/l2tp/l2tp_core.c46
-rw-r--r--net/l2tp/l2tp_core.h3
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/x_tables.c30
-rw-r--r--net/netfilter/xt_hashlimit.c16
-rw-r--r--net/netfilter/xt_recent.c6
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/openvswitch/meter.c12
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_csum.c5
-rw-r--r--net/sched/act_ipt.c9
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbmod.c5
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/act_vlan.c5
-rw-r--r--net/sched/sch_generic.c22
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_close.c25
-rw-r--r--net/socket.c5
-rw-r--r--net/xfrm/xfrm_ipcomp.c2
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_replay.c2
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c21
-rw-r--r--scripts/Makefile.lib8
-rw-r--r--scripts/basic/fixdep.c15
-rwxr-xr-xscripts/bloat-o-meter2
-rw-r--r--sound/core/oss/pcm_oss.c10
-rw-r--r--sound/core/seq/seq_clientmgr.c26
-rw-r--r--sound/core/seq/seq_fifo.c2
-rw-r--r--sound/core/seq/seq_memory.c14
-rw-r--r--sound/core/seq/seq_memory.h3
-rw-r--r--sound/core/seq/seq_prioq.c28
-rw-r--r--sound/core/seq/seq_prioq.h6
-rw-r--r--sound/core/seq/seq_queue.c28
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c45
-rw-r--r--sound/soc/amd/acp-pcm-dma.c16
-rw-r--r--sound/soc/amd/acp.h2
-rw-r--r--sound/soc/codecs/hdmi-codec.c7
-rw-r--r--sound/soc/codecs/rt5651.c1
-rw-r--r--sound/soc/codecs/sgtl5000.c26
-rw-r--r--sound/soc/codecs/wm_adsp.c14
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/bpf/bpftool/common.c4
-rw-r--r--tools/include/uapi/linux/kvm.h2
-rw-r--r--tools/objtool/check.c27
-rw-r--r--tools/perf/Documentation/perf-kallsyms.txt2
-rw-r--r--tools/perf/builtin-record.c9
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/ui/browsers/annotate.c25
-rw-r--r--tools/perf/util/auxtrace.c15
-rw-r--r--tools/perf/util/record.c8
-rw-r--r--tools/perf/util/trigger.h9
-rw-r--r--tools/testing/selftests/powerpc/mm/subpage_prot.c14
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c2
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests25
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c32
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c11
-rw-r--r--virt/kvm/arm/arch_timer.c6
-rw-r--r--virt/kvm/arm/arm.c9
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c3
-rw-r--r--virt/kvm/arm/mmu.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c11
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c9
-rw-r--r--virt/kvm/arm/vgic/vgic.c87
-rw-r--r--virt/kvm/arm/vgic/vgic.h3
636 files changed, 6274 insertions, 4191 deletions
diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata
index aa4296498859..9ab0ef1dd1c7 100644
--- a/Documentation/ABI/testing/sysfs-ata
+++ b/Documentation/ABI/testing/sysfs-ata
@@ -1,110 +1,139 @@
1What: /sys/class/ata_... 1What: /sys/class/ata_...
2Date: August 2008
3Contact: Gwendal Grignou<gwendal@google.com>
4Description: 2Description:
5 3 Provide a place in sysfs for storing the ATA topology of the
6Provide a place in sysfs for storing the ATA topology of the system. This allows 4 system. This allows retrieving various information about ATA
7retrieving various information about ATA objects. 5 objects.
8 6
9Files under /sys/class/ata_port 7Files under /sys/class/ata_port
10------------------------------- 8-------------------------------
11 9
12 For each port, a directory ataX is created where X is the ata_port_id of 10For each port, a directory ataX is created where X is the ata_port_id of the
13 the port. The device parent is the ata host device. 11port. The device parent is the ata host device.
14 12
15idle_irq (read)
16 13
17 Number of IRQ received by the port while idle [some ata HBA only]. 14What: /sys/class/ata_port/ataX/nr_pmp_links
15What: /sys/class/ata_port/ataX/idle_irq
16Date: May, 2010
17KernelVersion: v2.6.37
18Contact: Gwendal Grignou <gwendal@chromium.org>
19Description:
20 nr_pmp_links: (RO) If a SATA Port Multiplier (PM) is
21 connected, the number of links behind it.
18 22
19nr_pmp_links (read) 23 idle_irq: (RO) Number of IRQ received by the port while
24 idle [some ata HBA only].
20 25
21 If a SATA Port Multiplier (PM) is connected, number of link behind it. 26
27What: /sys/class/ata_port/ataX/port_no
28Date: May, 2013
29KernelVersion: v3.11
30Contact: Gwendal Grignou <gwendal@chromium.org>
31Description:
32 (RO) Host local port number. While registering host controller,
33 port numbers are tracked based upon number of ports available on
34 the controller. This attribute is needed by udev for composing
35 persistent links in /dev/disk/by-path.
22 36
23Files under /sys/class/ata_link 37Files under /sys/class/ata_link
24------------------------------- 38-------------------------------
25 39
26 Behind each port, there is a ata_link. If there is a SATA PM in the 40Behind each port, there is a ata_link. If there is a SATA PM in the topology, 15
27 topology, 15 ata_link objects are created. 41ata_link objects are created.
28
29 If a link is behind a port, the directory name is linkX, where X is
30 ata_port_id of the port.
31 If a link is behind a PM, its name is linkX.Y where X is ata_port_id
32 of the parent port and Y the PM port.
33 42
34hw_sata_spd_limit 43If a link is behind a port, the directory name is linkX, where X is ata_port_id
44of the port. If a link is behind a PM, its name is linkX.Y where X is
45ata_port_id of the parent port and Y the PM port.
35 46
36 Maximum speed supported by the connected SATA device.
37 47
38sata_spd_limit 48What: /sys/class/ata_link/linkX[.Y]/hw_sata_spd_limit
49What: /sys/class/ata_link/linkX[.Y]/sata_spd_limit
50What: /sys/class/ata_link/linkX[.Y]/sata_spd
51Date: May, 2010
52KernelVersion: v2.6.37
53Contact: Gwendal Grignou <gwendal@chromium.org>
54Description:
55 hw_sata_spd_limit: (RO) Maximum speed supported by the
56 connected SATA device.
39 57
40 Maximum speed imposed by libata. 58 sata_spd_limit: (RO) Maximum speed imposed by libata.
41 59
42sata_spd 60 sata_spd: (RO) Current speed of the link
61 eg. 1.5, 3 Gbps etc.
43 62
44 Current speed of the link [1.5, 3Gps,...].
45 63
46Files under /sys/class/ata_device 64Files under /sys/class/ata_device
47--------------------------------- 65---------------------------------
48 66
49 Behind each link, up to two ata device are created. 67Behind each link, up to two ata devices are created.
50 The name of the directory is devX[.Y].Z where: 68The name of the directory is devX[.Y].Z where:
51 - X is ata_port_id of the port where the device is connected, 69- X is ata_port_id of the port where the device is connected,
52 - Y the port of the PM if any, and 70- Y the port of the PM if any, and
53 - Z the device id: for PATA, there is usually 2 devices [0,1], 71- Z the device id: for PATA, there is usually 2 devices [0,1], only 1 for SATA.
54 only 1 for SATA. 72
55 73
56class 74What: /sys/class/ata_device/devX[.Y].Z/spdn_cnt
57 Device class. Can be "ata" for disk, "atapi" for packet device, 75What: /sys/class/ata_device/devX[.Y].Z/gscr
58 "pmp" for PM, or "none" if no device was found behind the link. 76What: /sys/class/ata_device/devX[.Y].Z/ering
59 77What: /sys/class/ata_device/devX[.Y].Z/id
60dma_mode 78What: /sys/class/ata_device/devX[.Y].Z/pio_mode
79What: /sys/class/ata_device/devX[.Y].Z/xfer_mode
80What: /sys/class/ata_device/devX[.Y].Z/dma_mode
81What: /sys/class/ata_device/devX[.Y].Z/class
82Date: May, 2010
83KernelVersion: v2.6.37
84Contact: Gwendal Grignou <gwendal@chromium.org>
85Description:
86 spdn_cnt: (RO) Number of times libata decided to lower the
87 speed of link due to errors.
61 88
62 Transfer modes supported by the device when in DMA mode. 89 gscr: (RO) Cached result of the dump of PM GSCR
63 Mostly used by PATA device. 90 register. Valid registers are:
64 91
65pio_mode 92 0: SATA_PMP_GSCR_PROD_ID,
93 1: SATA_PMP_GSCR_REV,
94 2: SATA_PMP_GSCR_PORT_INFO,
95 32: SATA_PMP_GSCR_ERROR,
96 33: SATA_PMP_GSCR_ERROR_EN,
97 64: SATA_PMP_GSCR_FEAT,
98 96: SATA_PMP_GSCR_FEAT_EN,
99 130: SATA_PMP_GSCR_SII_GPIO
66 100
67 Transfer modes supported by the device when in PIO mode. 101 Only valid if the device is a PM.
68 Mostly used by PATA device.
69 102
70xfer_mode 103 ering: (RO) Formatted output of the error ring of the
104 device.
71 105
72 Current transfer mode. 106 id: (RO) Cached result of IDENTIFY command, as
107 described in ATA8 7.16 and 7.17. Only valid if
108 the device is not a PM.
73 109
74id 110 pio_mode: (RO) Transfer modes supported by the device when
111 in PIO mode. Mostly used by PATA device.
75 112
76 Cached result of IDENTIFY command, as described in ATA8 7.16 and 7.17. 113 xfer_mode: (RO) Current transfer mode
77 Only valid if the device is not a PM.
78 114
79gscr 115 dma_mode: (RO) Transfer modes supported by the device when
116 in DMA mode. Mostly used by PATA device.
80 117
81 Cached result of the dump of PM GSCR register. 118 class: (RO) Device class. Can be "ata" for disk,
82 Valid registers are: 119 "atapi" for packet device, "pmp" for PM, or
83 0: SATA_PMP_GSCR_PROD_ID, 120 "none" if no device was found behind the link.
84 1: SATA_PMP_GSCR_REV,
85 2: SATA_PMP_GSCR_PORT_INFO,
86 32: SATA_PMP_GSCR_ERROR,
87 33: SATA_PMP_GSCR_ERROR_EN,
88 64: SATA_PMP_GSCR_FEAT,
89 96: SATA_PMP_GSCR_FEAT_EN,
90 130: SATA_PMP_GSCR_SII_GPIO
91 Only valid if the device is a PM.
92 121
93trim
94 122
95 Shows the DSM TRIM mode currently used by the device. Valid 123What: /sys/class/ata_device/devX[.Y].Z/trim
96 values are: 124Date: May, 2015
97 unsupported: Drive does not support DSM TRIM 125KernelVersion: v4.10
98 unqueued: Drive supports unqueued DSM TRIM only 126Contact: Gwendal Grignou <gwendal@chromium.org>
99 queued: Drive supports queued DSM TRIM 127Description:
100 forced_unqueued: Drive's queued DSM support is known to be 128 (RO) Shows the DSM TRIM mode currently used by the device. Valid
101 buggy and only unqueued TRIM commands 129 values are:
102 are sent
103 130
104spdn_cnt 131 unsupported: Drive does not support DSM TRIM
105 132
106 Number of time libata decided to lower the speed of link due to errors. 133 unqueued: Drive supports unqueued DSM TRIM only
107 134
108ering 135 queued: Drive supports queued DSM TRIM
109 136
110 Formatted output of the error ring of the device. 137 forced_unqueued: Drive's queued DSM support is known to
138 be buggy and only unqueued TRIM commands
139 are sent
diff --git a/Documentation/ABI/testing/sysfs-block-device b/Documentation/ABI/testing/sysfs-block-device
new file mode 100644
index 000000000000..82ef6eab042d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-device
@@ -0,0 +1,58 @@
1What: /sys/block/*/device/sw_activity
2Date: Jun, 2008
3KernelVersion: v2.6.27
4Contact: linux-ide@vger.kernel.org
5Description:
6 (RW) Used by drivers which support software controlled activity
7 LEDs.
8
9 It has the following valid values:
10
11 0 OFF - the LED is not activated on activity
12 1 BLINK_ON - the LED blinks on every 10ms when activity is
13 detected.
14 2 BLINK_OFF - the LED is on when idle, and blinks off
15 every 10ms when activity is detected.
16
17 Note that the user must turn sw_activity OFF it they wish to
18 control the activity LED via the em_message file.
19
20
21What: /sys/block/*/device/unload_heads
22Date: Sep, 2008
23KernelVersion: v2.6.28
24Contact: linux-ide@vger.kernel.org
25Description:
26 (RW) Hard disk shock protection
27
28 Writing an integer value to this file will take the heads of the
29 respective drive off the platter and block all I/O operations
30 for the specified number of milliseconds.
31
32 - If the device does not support the unload heads feature,
33 access is denied with -EOPNOTSUPP.
34 - The maximal value accepted for a timeout is 30000
35 milliseconds.
36 - A previously set timeout can be cancelled and disk can resume
37 normal operation immediately by specifying a timeout of 0.
38 - Some hard drives only comply with an earlier version of the
39 ATA standard, but support the unload feature nonetheless.
40 There is no safe way Linux can detect these devices, so this
41 is not enabled by default. If it is known that your device
42 does support the unload feature, then you can tell the kernel
43 to enable it by writing -1. It can be disabled again by
44 writing -2.
45 - Values below -2 are rejected with -EINVAL
46
47 For more information, see
48 Documentation/laptops/disk-shock-protection.txt
49
50
51What: /sys/block/*/device/ncq_prio_enable
52Date: Oct, 2016
53KernelVersion: v4.10
54Contact: linux-ide@vger.kernel.org
55Description:
56 (RW) Write to the file to turn on or off the SATA ncq (native
57 command queueing) support. By default this feature is turned
58 off.
diff --git a/Documentation/ABI/testing/sysfs-class-scsi_host b/Documentation/ABI/testing/sysfs-class-scsi_host
index 0eb255e7db12..bafc59fd7b69 100644
--- a/Documentation/ABI/testing/sysfs-class-scsi_host
+++ b/Documentation/ABI/testing/sysfs-class-scsi_host
@@ -27,3 +27,92 @@ Description: This file contains the current status of the "SSD Smart Path"
27 the direct i/o path to physical devices. This setting is 27 the direct i/o path to physical devices. This setting is
28 controller wide, affecting all configured logical drives on the 28 controller wide, affecting all configured logical drives on the
29 controller. This file is readable and writable. 29 controller. This file is readable and writable.
30
31What: /sys/class/scsi_host/hostX/link_power_management_policy
32Date: Oct, 2007
33KernelVersion: v2.6.24
34Contact: linux-ide@vger.kernel.org
35Description:
36 (RW) This parameter allows the user to read and set the link
37 (interface) power management.
38
39 There are four possible options:
40
41 min_power: Tell the controller to try to make the link use the
42 least possible power when possible. This may sacrifice some
43 performance due to increased latency when coming out of lower
44 power states.
45
46 max_performance: Generally, this means no power management.
47 Tell the controller to have performance be a priority over power
48 management.
49
50 medium_power: Tell the controller to enter a lower power state
51 when possible, but do not enter the lowest power state, thus
52 improving latency over min_power setting.
53
54 med_power_with_dipm: Identical to the existing medium_power
55 setting except that it enables dipm (device initiated power
56 management) on top, which makes it match the Windows IRST (Intel
57 Rapid Storage Technology) driver settings. This setting is also
58 close to min_power, except that:
59 a) It does not use host-initiated slumber mode, but it does
60 allow device-initiated slumber
61 b) It does not enable low power device sleep mode (DevSlp).
62
63What: /sys/class/scsi_host/hostX/em_message
64What: /sys/class/scsi_host/hostX/em_message_type
65Date: Jun, 2008
66KernelVersion: v2.6.27
67Contact: linux-ide@vger.kernel.org
68Description:
69 em_message: (RW) Enclosure management support. For the LED
70 protocol, writes and reads correspond to the LED message format
71 as defined in the AHCI spec.
72
73 The user must turn sw_activity (under /sys/block/*/device/) OFF
74 it they wish to control the activity LED via the em_message
75 file.
76
77 em_message_type: (RO) Displays the current enclosure management
78 protocol that is being used by the driver (for eg. LED, SAF-TE,
79 SES-2, SGPIO etc).
80
81What: /sys/class/scsi_host/hostX/ahci_port_cmd
82What: /sys/class/scsi_host/hostX/ahci_host_caps
83What: /sys/class/scsi_host/hostX/ahci_host_cap2
84Date: Mar, 2010
85KernelVersion: v2.6.35
86Contact: linux-ide@vger.kernel.org
87Description:
88 [to be documented]
89
90What: /sys/class/scsi_host/hostX/ahci_host_version
91Date: Mar, 2010
92KernelVersion: v2.6.35
93Contact: linux-ide@vger.kernel.org
94Description:
95 (RO) Display the version of the AHCI spec implemented by the
96 host.
97
98What: /sys/class/scsi_host/hostX/em_buffer
99Date: Apr, 2010
100KernelVersion: v2.6.35
101Contact: linux-ide@vger.kernel.org
102Description:
103 (RW) Allows access to AHCI EM (enclosure management) buffer
104 directly if the host supports EM.
105
106 For eg. the AHCI driver supports SGPIO EM messages but the
107 SATA/AHCI specs do not define the SGPIO message format of the EM
108 buffer. Different hardware(HW) vendors may have different
109 definitions. With the em_buffer attribute, this issue can be
110 solved by allowing HW vendors to provide userland drivers and
111 tools for their SGPIO initiators.
112
113What: /sys/class/scsi_host/hostX/em_message_supported
114Date: Oct, 2009
115KernelVersion: v2.6.39
116Contact: linux-ide@vger.kernel.org
117Description:
118 (RO) Displays supported enclosure management message types.
diff --git a/Documentation/accelerators/ocxl.rst b/Documentation/accelerators/ocxl.rst
index 4f7af841d935..ddcc58d01cfb 100644
--- a/Documentation/accelerators/ocxl.rst
+++ b/Documentation/accelerators/ocxl.rst
@@ -152,6 +152,11 @@ OCXL_IOCTL_IRQ_SET_FD:
152 Associate an event fd to an AFU interrupt so that the user process 152 Associate an event fd to an AFU interrupt so that the user process
153 can be notified when the AFU sends an interrupt. 153 can be notified when the AFU sends an interrupt.
154 154
155OCXL_IOCTL_GET_METADATA:
156
157 Obtains configuration information from the card, such at the size of
158 MMIO areas, the AFU version, and the PASID for the current context.
159
155 160
156mmap 161mmap
157---- 162----
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt b/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
index 6394ea9e3b9e..58b12e25bbb1 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_hdmi.txt
@@ -16,6 +16,7 @@ Required properties:
16- ddc: phandle to the hdmi ddc node 16- ddc: phandle to the hdmi ddc node
17- phy: phandle to the hdmi phy node 17- phy: phandle to the hdmi phy node
18- samsung,syscon-phandle: phandle for system controller node for PMU. 18- samsung,syscon-phandle: phandle for system controller node for PMU.
19- #sound-dai-cells: should be 0.
19 20
20Required properties for Exynos 4210, 4212, 5420 and 5433: 21Required properties for Exynos 4210, 4212, 5420 and 5433:
21- clocks: list of clock IDs from SoC clock driver. 22- clocks: list of clock IDs from SoC clock driver.
diff --git a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
index 217a90eaabe7..9c38bbe7e6d7 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor-v2.txt
@@ -11,7 +11,11 @@ Required properties:
11 interrupts. 11 interrupts.
12 12
13Optional properties: 13Optional properties:
14- clocks: Optional reference to the clock used by the XOR engine. 14- clocks: Optional reference to the clocks used by the XOR engine.
15- clock-names: mandatory if there is a second clock, in this case the
16 name must be "core" for the first clock and "reg" for the second
17 one
18
15 19
16Example: 20Example:
17 21
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index caf71e2fe24a..60d50a2b0323 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -59,14 +59,15 @@ Example:
59 compatible = "marvell,mv88e6085"; 59 compatible = "marvell,mv88e6085";
60 reg = <0>; 60 reg = <0>;
61 reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; 61 reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
62 }; 62
63 mdio { 63 mdio {
64 #address-cells = <1>; 64 #address-cells = <1>;
65 #size-cells = <0>; 65 #size-cells = <0>;
66 switch1phy0: switch1phy0@0 { 66 switch1phy0: switch1phy0@0 {
67 reg = <0>; 67 reg = <0>;
68 interrupt-parent = <&switch0>; 68 interrupt-parent = <&switch0>;
69 interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; 69 interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
70 };
70 }; 71 };
71 }; 72 };
72 }; 73 };
@@ -83,23 +84,24 @@ Example:
83 compatible = "marvell,mv88e6390"; 84 compatible = "marvell,mv88e6390";
84 reg = <0>; 85 reg = <0>;
85 reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; 86 reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
86 }; 87
87 mdio { 88 mdio {
88 #address-cells = <1>; 89 #address-cells = <1>;
89 #size-cells = <0>; 90 #size-cells = <0>;
90 switch1phy0: switch1phy0@0 { 91 switch1phy0: switch1phy0@0 {
91 reg = <0>; 92 reg = <0>;
92 interrupt-parent = <&switch0>; 93 interrupt-parent = <&switch0>;
93 interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; 94 interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
95 };
94 }; 96 };
95 };
96 97
97 mdio1 { 98 mdio1 {
98 compatible = "marvell,mv88e6xxx-mdio-external"; 99 compatible = "marvell,mv88e6xxx-mdio-external";
99 #address-cells = <1>; 100 #address-cells = <1>;
100 #size-cells = <0>; 101 #size-cells = <0>;
101 switch1phy9: switch1phy0@9 { 102 switch1phy9: switch1phy0@9 {
102 reg = <9>; 103 reg = <9>;
104 };
103 }; 105 };
104 }; 106 };
105 }; 107 };
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 92fd4b2f17b2..b4dc455eb155 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -27,7 +27,11 @@ Required properties:
27 SoC-specific version corresponding to the platform first followed by 27 SoC-specific version corresponding to the platform first followed by
28 the generic version. 28 the generic version.
29 29
30- reg: offset and length of (1) the register block and (2) the stream buffer. 30- reg: Offset and length of (1) the register block and (2) the stream buffer.
31 The region for the register block is mandatory.
32 The region for the stream buffer is optional, as it is only present on
33 R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
34 and M3-N (R8A77965).
31- interrupts: A list of interrupt-specifiers, one for each entry in 35- interrupts: A list of interrupt-specifiers, one for each entry in
32 interrupt-names. 36 interrupt-names.
33 If interrupt-names is not present, an interrupt specifier 37 If interrupt-names is not present, an interrupt specifier
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index e64d903bcbe8..46da5f184460 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -19,7 +19,7 @@ Required properties:
19 configured in FS mode; 19 configured in FS mode;
20 - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs 20 - "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
21 configured in HS mode; 21 configured in HS mode;
22 - "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs 22 - "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
23 configured in HS mode; 23 configured in HS mode;
24- reg : Should contain 1 register range (address and length) 24- reg : Should contain 1 register range (address and length)
25- interrupts : Should contain 1 interrupt 25- interrupts : Should contain 1 interrupt
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
index 87a45e2f9b7f..2c071bb5801e 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
@@ -4,6 +4,7 @@ Required properties:
4 - compatible: Must contain one of the following: 4 - compatible: Must contain one of the following:
5 - "renesas,r8a7795-usb3-peri" 5 - "renesas,r8a7795-usb3-peri"
6 - "renesas,r8a7796-usb3-peri" 6 - "renesas,r8a7796-usb3-peri"
7 - "renesas,r8a77965-usb3-peri"
7 - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible 8 - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
8 device 9 device
9 10
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index d060172f1529..43960faf5a88 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -12,6 +12,7 @@ Required properties:
12 - "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device 12 - "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
13 - "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device 13 - "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
14 - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device 14 - "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
15 - "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
15 - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device 16 - "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
16 - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device 17 - "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
17 - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices 18 - "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index e2ea59bbca93..1651483a7048 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -13,6 +13,7 @@ Required properties:
13 - "renesas,xhci-r8a7793" for r8a7793 SoC 13 - "renesas,xhci-r8a7793" for r8a7793 SoC
14 - "renesas,xhci-r8a7795" for r8a7795 SoC 14 - "renesas,xhci-r8a7795" for r8a7795 SoC
15 - "renesas,xhci-r8a7796" for r8a7796 SoC 15 - "renesas,xhci-r8a7796" for r8a7796 SoC
16 - "renesas,xhci-r8a77965" for r8a77965 SoC
16 - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible 17 - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
17 device 18 device
18 - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device 19 - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
index d47480b61ac6..aca542ec125c 100644
--- a/Documentation/networking/segmentation-offloads.txt
+++ b/Documentation/networking/segmentation-offloads.txt
@@ -20,8 +20,8 @@ TCP Segmentation Offload
20 20
21TCP segmentation allows a device to segment a single frame into multiple 21TCP segmentation allows a device to segment a single frame into multiple
22frames with a data payload size specified in skb_shinfo()->gso_size. 22frames with a data payload size specified in skb_shinfo()->gso_size.
23When TCP segmentation requested the bit for either SKB_GSO_TCP or 23When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
24SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and 24SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
25skb_shinfo()->gso_size should be set to a non-zero value. 25skb_shinfo()->gso_size should be set to a non-zero value.
26 26
27TCP segmentation is dependent on support for the use of partial checksum 27TCP segmentation is dependent on support for the use of partial checksum
@@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
153 153
154Therefore, any code in the core networking stack must be aware of the 154Therefore, any code in the core networking stack must be aware of the
155possibility that gso_size will be GSO_BY_FRAGS and handle that case 155possibility that gso_size will be GSO_BY_FRAGS and handle that case
156appropriately. (For size checks, the skb_gso_validate_*_len family of 156appropriately.
157helpers do this automatically.) 157
158There are some helpers to make this easier:
159
160 - skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
161 an skb is an SCTP GSO skb.
162
163 - For size checks, the skb_gso_validate_*_len family of helpers correctly
164 considers GSO_BY_FRAGS.
165
166 - For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
167 will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
158 168
159This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits 169This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
160set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE. 170set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index 39aa9e8697cc..fbedcc39460b 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -36,8 +36,7 @@ import glob
36 36
37from docutils import nodes, statemachine 37from docutils import nodes, statemachine
38from docutils.statemachine import ViewList 38from docutils.statemachine import ViewList
39from docutils.parsers.rst import directives 39from docutils.parsers.rst import directives, Directive
40from sphinx.util.compat import Directive
41from sphinx.ext.autodoc import AutodocReporter 40from sphinx.ext.autodoc import AutodocReporter
42 41
43__version__ = '1.0' 42__version__ = '1.0'
diff --git a/MAINTAINERS b/MAINTAINERS
index 214c9bca232a..b3ea844cf228 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9941,6 +9941,13 @@ F: Documentation/ABI/stable/sysfs-bus-nvmem
9941F: include/linux/nvmem-consumer.h 9941F: include/linux/nvmem-consumer.h
9942F: include/linux/nvmem-provider.h 9942F: include/linux/nvmem-provider.h
9943 9943
9944NXP SGTL5000 DRIVER
9945M: Fabio Estevam <fabio.estevam@nxp.com>
9946L: alsa-devel@alsa-project.org (moderated for non-subscribers)
9947S: Maintained
9948F: Documentation/devicetree/bindings/sound/sgtl5000.txt
9949F: sound/soc/codecs/sgtl5000*
9950
9944NXP TDA998X DRM DRIVER 9951NXP TDA998X DRM DRIVER
9945M: Russell King <linux@armlinux.org.uk> 9952M: Russell King <linux@armlinux.org.uk>
9946S: Supported 9953S: Supported
@@ -10343,7 +10350,7 @@ F: drivers/oprofile/
10343F: include/linux/oprofile.h 10350F: include/linux/oprofile.h
10344 10351
10345ORACLE CLUSTER FILESYSTEM 2 (OCFS2) 10352ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
10346M: Mark Fasheh <mfasheh@versity.com> 10353M: Mark Fasheh <mark@fasheh.com>
10347M: Joel Becker <jlbec@evilplan.org> 10354M: Joel Becker <jlbec@evilplan.org>
10348L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) 10355L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
10349W: http://ocfs2.wiki.kernel.org 10356W: http://ocfs2.wiki.kernel.org
@@ -10853,6 +10860,7 @@ F: drivers/platform/x86/peaq-wmi.c
10853PER-CPU MEMORY ALLOCATOR 10860PER-CPU MEMORY ALLOCATOR
10854M: Tejun Heo <tj@kernel.org> 10861M: Tejun Heo <tj@kernel.org>
10855M: Christoph Lameter <cl@linux.com> 10862M: Christoph Lameter <cl@linux.com>
10863M: Dennis Zhou <dennisszhou@gmail.com>
10856T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git 10864T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
10857S: Maintained 10865S: Maintained
10858F: include/linux/percpu*.h 10866F: include/linux/percpu*.h
@@ -12123,6 +12131,7 @@ M: Sylwester Nawrocki <s.nawrocki@samsung.com>
12123L: alsa-devel@alsa-project.org (moderated for non-subscribers) 12131L: alsa-devel@alsa-project.org (moderated for non-subscribers)
12124S: Supported 12132S: Supported
12125F: sound/soc/samsung/ 12133F: sound/soc/samsung/
12134F: Documentation/devicetree/bindings/sound/samsung*
12126 12135
12127SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER 12136SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
12128M: Krzysztof Kozlowski <krzk@kernel.org> 12137M: Krzysztof Kozlowski <krzk@kernel.org>
diff --git a/Makefile b/Makefile
index c4322dea3ca2..486db374d1c1 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 16 3PATCHLEVEL = 16
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc4 5EXTRAVERSION = -rc6
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
826# disable invalid "can't wrap" optimizations for signed / pointers 826# disable invalid "can't wrap" optimizations for signed / pointers
827KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) 827KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
828 828
829# clang sets -fmerge-all-constants by default as optimization, but this
830# is non-conforming behavior for C and in fact breaks the kernel, so we
831# need to disable it here generally.
832KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
833
834# for gcc -fno-merge-all-constants disables everything, but it is fine
835# to have actual conforming behavior enabled.
836KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
837
829# Make sure -fstack-check isn't enabled (like gentoo apparently did) 838# Make sure -fstack-check isn't enabled (like gentoo apparently did)
830KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) 839KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
831 840
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 52f15cd896e1..b5a28336c077 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
178 case PSCI_CONDUIT_HVC: 178 case PSCI_CONDUIT_HVC:
179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
180 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 180 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
181 if (res.a0) 181 if ((int)res.a0 < 0)
182 return 0; 182 return 0;
183 cb = call_hvc_arch_workaround_1; 183 cb = call_hvc_arch_workaround_1;
184 smccc_start = __smccc_workaround_1_hvc_start; 184 smccc_start = __smccc_workaround_1_hvc_start;
@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
188 case PSCI_CONDUIT_SMC: 188 case PSCI_CONDUIT_SMC:
189 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 189 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
190 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 190 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
191 if (res.a0) 191 if ((int)res.a0 < 0)
192 return 0; 192 return 0;
193 cb = call_smc_arch_workaround_1; 193 cb = call_smc_arch_workaround_1;
194 smccc_start = __smccc_workaround_1_smc_start; 194 smccc_start = __smccc_workaround_1_smc_start;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index d7e3299a7734..959e50d2588c 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
363{ 363{
364 int ret = 0; 364 int ret = 0;
365 365
366 vcpu_load(vcpu);
367
368 trace_kvm_set_guest_debug(vcpu, dbg->control); 366 trace_kvm_set_guest_debug(vcpu, dbg->control);
369 367
370 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { 368 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
@@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
386 } 384 }
387 385
388out: 386out:
389 vcpu_put(vcpu);
390 return ret; 387 return ret;
391} 388}
392 389
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 84a019f55022..2dbb2c9f1ec1 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
108 * The following mapping attributes may be updated in live 108 * The following mapping attributes may be updated in live
109 * kernel mappings without the need for break-before-make. 109 * kernel mappings without the need for break-before-make.
110 */ 110 */
111 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; 111 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
112 112
113 /* creating or taking down mappings is always safe */ 113 /* creating or taking down mappings is always safe */
114 if (old == 0 || new == 0) 114 if (old == 0 || new == 0)
@@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
118 if ((old | new) & PTE_CONT) 118 if ((old | new) & PTE_CONT)
119 return false; 119 return false;
120 120
121 /* Transitioning from Global to Non-Global is safe */ 121 /* Transitioning from Non-Global to Global is unsafe */
122 if (((old ^ new) == PTE_NG) && (new & PTE_NG)) 122 if (old & ~new & PTE_NG)
123 return true; 123 return false;
124 124
125 return ((old ^ new) & ~mask) == 0; 125 return ((old ^ new) & ~mask) == 0;
126} 126}
@@ -972,3 +972,13 @@ int pmd_clear_huge(pmd_t *pmdp)
972 pmd_clear(pmdp); 972 pmd_clear(pmdp);
973 return 1; 973 return 1;
974} 974}
975
976int pud_free_pmd_page(pud_t *pud)
977{
978 return pud_none(*pud);
979}
980
981int pmd_free_pte_page(pmd_t *pmd)
982{
983 return pmd_none(*pmd);
984}
diff --git a/arch/h8300/include/asm/byteorder.h b/arch/h8300/include/asm/byteorder.h
index ecff2d1ca5a3..6eaa7ad5fc2c 100644
--- a/arch/h8300/include/asm/byteorder.h
+++ b/arch/h8300/include/asm/byteorder.h
@@ -2,7 +2,6 @@
2#ifndef __H8300_BYTEORDER_H__ 2#ifndef __H8300_BYTEORDER_H__
3#define __H8300_BYTEORDER_H__ 3#define __H8300_BYTEORDER_H__
4 4
5#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
6#include <linux/byteorder/big_endian.h> 5#include <linux/byteorder/big_endian.h>
7 6
8#endif 7#endif
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 4f798aa671dd..3817a3e2146c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -24,6 +24,7 @@ config MICROBLAZE
24 select HAVE_FTRACE_MCOUNT_RECORD 24 select HAVE_FTRACE_MCOUNT_RECORD
25 select HAVE_FUNCTION_GRAPH_TRACER 25 select HAVE_FUNCTION_GRAPH_TRACER
26 select HAVE_FUNCTION_TRACER 26 select HAVE_FUNCTION_TRACER
27 select NO_BOOTMEM
27 select HAVE_MEMBLOCK 28 select HAVE_MEMBLOCK
28 select HAVE_MEMBLOCK_NODE_MAP 29 select HAVE_MEMBLOCK_NODE_MAP
29 select HAVE_OPROFILE 30 select HAVE_OPROFILE
diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform
index 6996f397c16c..f7f1739c11b9 100644
--- a/arch/microblaze/Kconfig.platform
+++ b/arch/microblaze/Kconfig.platform
@@ -8,7 +8,6 @@ menu "Platform options"
8 8
9config OPT_LIB_FUNCTION 9config OPT_LIB_FUNCTION
10 bool "Optimalized lib function" 10 bool "Optimalized lib function"
11 depends on CPU_LITTLE_ENDIAN
12 default y 11 default y
13 help 12 help
14 Allows turn on optimalized library function (memcpy and memmove). 13 Allows turn on optimalized library function (memcpy and memmove).
@@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION
21config OPT_LIB_ASM 20config OPT_LIB_ASM
22 bool "Optimalized lib function ASM" 21 bool "Optimalized lib function ASM"
23 depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1) 22 depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
23 depends on CPU_BIG_ENDIAN
24 default n 24 default n
25 help 25 help
26 Allows turn on optimalized library function (memcpy and memmove). 26 Allows turn on optimalized library function (memcpy and memmove).
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index be84a4d3917f..7c968c1d1729 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -44,7 +44,6 @@ void machine_shutdown(void);
44void machine_halt(void); 44void machine_halt(void);
45void machine_power_off(void); 45void machine_power_off(void);
46 46
47extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
48extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 47extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
49 48
50# endif /* __ASSEMBLY__ */ 49# endif /* __ASSEMBLY__ */
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 62021d7e249e..fdc48bb065d8 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -29,10 +29,6 @@
29 * between mem locations with size of xfer spec'd in bytes 29 * between mem locations with size of xfer spec'd in bytes
30 */ 30 */
31 31
32#ifdef __MICROBLAZEEL__
33#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
34#endif
35
36#include <linux/linkage.h> 32#include <linux/linkage.h>
37 .text 33 .text
38 .globl memcpy 34 .globl memcpy
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 434639f9a3a6..df6de7ccdc2e 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -32,9 +32,6 @@ int mem_init_done;
32#ifndef CONFIG_MMU 32#ifndef CONFIG_MMU
33unsigned int __page_offset; 33unsigned int __page_offset;
34EXPORT_SYMBOL(__page_offset); 34EXPORT_SYMBOL(__page_offset);
35
36#else
37static int init_bootmem_done;
38#endif /* CONFIG_MMU */ 35#endif /* CONFIG_MMU */
39 36
40char *klimit = _end; 37char *klimit = _end;
@@ -117,7 +114,6 @@ static void __init paging_init(void)
117 114
118void __init setup_memory(void) 115void __init setup_memory(void)
119{ 116{
120 unsigned long map_size;
121 struct memblock_region *reg; 117 struct memblock_region *reg;
122 118
123#ifndef CONFIG_MMU 119#ifndef CONFIG_MMU
@@ -174,17 +170,6 @@ void __init setup_memory(void)
174 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); 170 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
175 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn); 171 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
176 172
177 /*
178 * Find an area to use for the bootmem bitmap.
179 * We look for the first area which is at least
180 * 128kB in length (128kB is enough for a bitmap
181 * for 4GB of memory, using 4kB pages), plus 1 page
182 * (in case the address isn't page-aligned).
183 */
184 map_size = init_bootmem_node(NODE_DATA(0),
185 PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
186 memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
187
188 /* Add active regions with valid PFNs */ 173 /* Add active regions with valid PFNs */
189 for_each_memblock(memory, reg) { 174 for_each_memblock(memory, reg) {
190 unsigned long start_pfn, end_pfn; 175 unsigned long start_pfn, end_pfn;
@@ -196,32 +181,9 @@ void __init setup_memory(void)
196 &memblock.memory, 0); 181 &memblock.memory, 0);
197 } 182 }
198 183
199 /* free bootmem is whole main memory */
200 free_bootmem_with_active_regions(0, max_low_pfn);
201
202 /* reserve allocate blocks */
203 for_each_memblock(reserved, reg) {
204 unsigned long top = reg->base + reg->size - 1;
205
206 pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
207 (u32) reg->base, (u32) reg->size, top,
208 memory_start + lowmem_size - 1);
209
210 if (top <= (memory_start + lowmem_size - 1)) {
211 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
212 } else if (reg->base < (memory_start + lowmem_size - 1)) {
213 unsigned long trunc_size = memory_start + lowmem_size -
214 reg->base;
215 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
216 }
217 }
218
219 /* XXX need to clip this if using highmem? */ 184 /* XXX need to clip this if using highmem? */
220 sparse_memory_present_with_active_regions(0); 185 sparse_memory_present_with_active_regions(0);
221 186
222#ifdef CONFIG_MMU
223 init_bootmem_done = 1;
224#endif
225 paging_init(); 187 paging_init();
226} 188}
227 189
@@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void)
398/* This is only called until mem_init is done. */ 360/* This is only called until mem_init is done. */
399void __init *early_get_page(void) 361void __init *early_get_page(void)
400{ 362{
401 void *p; 363 /*
402 if (init_bootmem_done) { 364 * Mem start + kernel_tlb -> here is limit
403 p = alloc_bootmem_pages(PAGE_SIZE); 365 * because of mem mapping from head.S
404 } else { 366 */
405 /* 367 return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
406 * Mem start + kernel_tlb -> here is limit 368 memory_start + kernel_tlb));
407 * because of mem mapping from head.S
408 */
409 p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
410 memory_start + kernel_tlb));
411 }
412 return p;
413} 369}
414 370
415#endif /* CONFIG_MMU */ 371#endif /* CONFIG_MMU */
416 372
417void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
418{
419 if (mem_init_done)
420 return kmalloc(size, mask);
421 else
422 return alloc_bootmem(size);
423}
424
425void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) 373void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
426{ 374{
427 void *p; 375 void *p;
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
index 9ab48ff80c1c..6d11ae581ea7 100644
--- a/arch/mips/ath25/board.c
+++ b/arch/mips/ath25/board.c
@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
135 } 135 }
136 136
137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); 137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
138 if (!board_data)
139 goto error;
138 ath25_board.config = (struct ath25_boarddata *)board_data; 140 ath25_board.config = (struct ath25_boarddata *)board_data;
139 memcpy_fromio(board_data, bcfg, 0x100); 141 memcpy_fromio(board_data, bcfg, 0x100);
140 if (broken_boarddata) { 142 if (broken_boarddata) {
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 5b3a3f6a9ad3..d99f5242169e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2277,6 +2277,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
2277 } 2277 }
2278 2278
2279 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 2279 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
2280 if (!host_data)
2281 return -ENOMEM;
2280 raw_spin_lock_init(&host_data->lock); 2282 raw_spin_lock_init(&host_data->lock);
2281 2283
2282 addr = of_get_address(ciu_node, 0, NULL, NULL); 2284 addr = of_get_address(ciu_node, 0, NULL, NULL);
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 9d41732a9146..159e83add4bb 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -168,11 +168,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
168 return; 168 return;
169 } 169 }
170 170
171 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 171 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
172 "smp_ipi0", NULL)) 172 IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
173 panic("Can't request IPI0 interrupt"); 173 panic("Can't request IPI0 interrupt");
174 if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 174 if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
175 "smp_ipi1", NULL)) 175 IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
176 panic("Can't request IPI1 interrupt"); 176 panic("Can't request IPI1 interrupt");
177} 177}
178 178
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index bc2fdbfa8223..72af0c183969 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -7,6 +7,8 @@ choice
7config LEMOTE_FULOONG2E 7config LEMOTE_FULOONG2E
8 bool "Lemote Fuloong(2e) mini-PC" 8 bool "Lemote Fuloong(2e) mini-PC"
9 select ARCH_SPARSEMEM_ENABLE 9 select ARCH_SPARSEMEM_ENABLE
10 select ARCH_MIGHT_HAVE_PC_PARPORT
11 select ARCH_MIGHT_HAVE_PC_SERIO
10 select CEVT_R4K 12 select CEVT_R4K
11 select CSRC_R4K 13 select CSRC_R4K
12 select SYS_HAS_CPU_LOONGSON2E 14 select SYS_HAS_CPU_LOONGSON2E
@@ -33,6 +35,8 @@ config LEMOTE_FULOONG2E
33config LEMOTE_MACH2F 35config LEMOTE_MACH2F
34 bool "Lemote Loongson 2F family machines" 36 bool "Lemote Loongson 2F family machines"
35 select ARCH_SPARSEMEM_ENABLE 37 select ARCH_SPARSEMEM_ENABLE
38 select ARCH_MIGHT_HAVE_PC_PARPORT
39 select ARCH_MIGHT_HAVE_PC_SERIO
36 select BOARD_SCACHE 40 select BOARD_SCACHE
37 select BOOT_ELF32 41 select BOOT_ELF32
38 select CEVT_R4K if ! MIPS_EXTERNAL_TIMER 42 select CEVT_R4K if ! MIPS_EXTERNAL_TIMER
@@ -62,6 +66,8 @@ config LEMOTE_MACH2F
62config LOONGSON_MACH3X 66config LOONGSON_MACH3X
63 bool "Generic Loongson 3 family machines" 67 bool "Generic Loongson 3 family machines"
64 select ARCH_SPARSEMEM_ENABLE 68 select ARCH_SPARSEMEM_ENABLE
69 select ARCH_MIGHT_HAVE_PC_PARPORT
70 select ARCH_MIGHT_HAVE_PC_SERIO
65 select GENERIC_ISA_DMA_SUPPORT_BROKEN 71 select GENERIC_ISA_DMA_SUPPORT_BROKEN
66 select BOOT_ELF32 72 select BOOT_ELF32
67 select BOARD_SCACHE 73 select BOARD_SCACHE
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 79089778725b..e3b45546d589 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
543 rp3440, etc. So, avoid it if the mm isn't too big. */ 543 rp3440, etc. So, avoid it if the mm isn't too big. */
544 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 544 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
545 mm_total_size(mm) >= parisc_cache_flush_threshold) { 545 mm_total_size(mm) >= parisc_cache_flush_threshold) {
546 flush_tlb_all(); 546 if (mm->context)
547 flush_tlb_all();
547 flush_cache_all(); 548 flush_cache_all();
548 return; 549 return;
549 } 550 }
@@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
571 pfn = pte_pfn(*ptep); 572 pfn = pte_pfn(*ptep);
572 if (!pfn_valid(pfn)) 573 if (!pfn_valid(pfn))
573 continue; 574 continue;
575 if (unlikely(mm->context))
576 flush_tlb_page(vma, addr);
574 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 577 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
575 } 578 }
576 } 579 }
@@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
579void flush_cache_range(struct vm_area_struct *vma, 582void flush_cache_range(struct vm_area_struct *vma,
580 unsigned long start, unsigned long end) 583 unsigned long start, unsigned long end)
581{ 584{
585 pgd_t *pgd;
586 unsigned long addr;
587
582 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 588 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
583 end - start >= parisc_cache_flush_threshold) { 589 end - start >= parisc_cache_flush_threshold) {
584 flush_tlb_range(vma, start, end); 590 if (vma->vm_mm->context)
591 flush_tlb_range(vma, start, end);
585 flush_cache_all(); 592 flush_cache_all();
586 return; 593 return;
587 } 594 }
588 595
589 flush_user_dcache_range_asm(start, end); 596 if (vma->vm_mm->context == mfsp(3)) {
590 if (vma->vm_flags & VM_EXEC) 597 flush_user_dcache_range_asm(start, end);
591 flush_user_icache_range_asm(start, end); 598 if (vma->vm_flags & VM_EXEC)
592 flush_tlb_range(vma, start, end); 599 flush_user_icache_range_asm(start, end);
600 flush_tlb_range(vma, start, end);
601 return;
602 }
603
604 pgd = vma->vm_mm->pgd;
605 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
606 unsigned long pfn;
607 pte_t *ptep = get_ptep(pgd, addr);
608 if (!ptep)
609 continue;
610 pfn = pte_pfn(*ptep);
611 if (pfn_valid(pfn)) {
612 if (unlikely(vma->vm_mm->context))
613 flush_tlb_page(vma, addr);
614 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
615 }
616 }
593} 617}
594 618
595void 619void
596flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 620flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
597{ 621{
598 BUG_ON(!vma->vm_mm->context);
599
600 if (pfn_valid(pfn)) { 622 if (pfn_valid(pfn)) {
601 flush_tlb_page(vma, vmaddr); 623 if (likely(vma->vm_mm->context))
624 flush_tlb_page(vma, vmaddr);
602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 625 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 } 626 }
604} 627}
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index ef6549e57157..26d5d2a5b8e9 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -101,7 +101,8 @@ $(addprefix $(obj)/,$(zlib-y)): \
101libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c 101libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
102libfdtheader := fdt.h libfdt.h libfdt_internal.h 102libfdtheader := fdt.h libfdt.h libfdt_internal.h
103 103
104$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \ 104$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o \
105 treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
105 $(addprefix $(obj)/,$(libfdtheader)) 106 $(addprefix $(obj)/,$(libfdtheader))
106 107
107src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ 108src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d22c41c26bb3..acf4b2e0530c 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -874,7 +874,6 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
874 .mmu = 0, 874 .mmu = 0,
875 .hash_ext = 0, 875 .hash_ext = 0,
876 .radix_ext = 0, 876 .radix_ext = 0,
877 .byte22 = 0,
878 }, 877 },
879 878
880 /* option vector 6: IBM PAPR hints */ 879 /* option vector 6: IBM PAPR hints */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0c854816e653..5cb4e4687107 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -195,6 +195,12 @@ static void kvmppc_pte_free(pte_t *ptep)
195 kmem_cache_free(kvm_pte_cache, ptep); 195 kmem_cache_free(kvm_pte_cache, ptep);
196} 196}
197 197
198/* Like pmd_huge() and pmd_large(), but works regardless of config options */
199static inline int pmd_is_leaf(pmd_t pmd)
200{
201 return !!(pmd_val(pmd) & _PAGE_PTE);
202}
203
198static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, 204static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
199 unsigned int level, unsigned long mmu_seq) 205 unsigned int level, unsigned long mmu_seq)
200{ 206{
@@ -219,7 +225,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
219 else 225 else
220 new_pmd = pmd_alloc_one(kvm->mm, gpa); 226 new_pmd = pmd_alloc_one(kvm->mm, gpa);
221 227
222 if (level == 0 && !(pmd && pmd_present(*pmd))) 228 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
223 new_ptep = kvmppc_pte_alloc(); 229 new_ptep = kvmppc_pte_alloc();
224 230
225 /* Check if we might have been invalidated; let the guest retry if so */ 231 /* Check if we might have been invalidated; let the guest retry if so */
@@ -244,12 +250,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
244 new_pmd = NULL; 250 new_pmd = NULL;
245 } 251 }
246 pmd = pmd_offset(pud, gpa); 252 pmd = pmd_offset(pud, gpa);
247 if (pmd_large(*pmd)) { 253 if (pmd_is_leaf(*pmd)) {
248 /* Someone else has instantiated a large page here; retry */ 254 unsigned long lgpa = gpa & PMD_MASK;
249 ret = -EAGAIN; 255
250 goto out_unlock; 256 /*
251 } 257 * If we raced with another CPU which has just put
252 if (level == 1 && !pmd_none(*pmd)) { 258 * a 2MB pte in after we saw a pte page, try again.
259 */
260 if (level == 0 && !new_ptep) {
261 ret = -EAGAIN;
262 goto out_unlock;
263 }
264 /* Valid 2MB page here already, remove it */
265 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
266 ~0UL, 0, lgpa, PMD_SHIFT);
267 kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
268 if (old & _PAGE_DIRTY) {
269 unsigned long gfn = lgpa >> PAGE_SHIFT;
270 struct kvm_memory_slot *memslot;
271 memslot = gfn_to_memslot(kvm, gfn);
272 if (memslot && memslot->dirty_bitmap)
273 kvmppc_update_dirty_map(memslot,
274 gfn, PMD_SIZE);
275 }
276 } else if (level == 1 && !pmd_none(*pmd)) {
253 /* 277 /*
254 * There's a page table page here, but we wanted 278 * There's a page table page here, but we wanted
255 * to install a large page. Tell the caller and let 279 * to install a large page. Tell the caller and let
@@ -412,28 +436,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
412 } else { 436 } else {
413 page = pages[0]; 437 page = pages[0];
414 pfn = page_to_pfn(page); 438 pfn = page_to_pfn(page);
415 if (PageHuge(page)) { 439 if (PageCompound(page)) {
416 page = compound_head(page); 440 pte_size <<= compound_order(compound_head(page));
417 pte_size <<= compound_order(page);
418 /* See if we can insert a 2MB large-page PTE here */ 441 /* See if we can insert a 2MB large-page PTE here */
419 if (pte_size >= PMD_SIZE && 442 if (pte_size >= PMD_SIZE &&
420 (gpa & PMD_MASK & PAGE_MASK) == 443 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
421 (hva & PMD_MASK & PAGE_MASK)) { 444 (hva & (PMD_SIZE - PAGE_SIZE))) {
422 level = 1; 445 level = 1;
423 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); 446 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
424 } 447 }
425 } 448 }
426 /* See if we can provide write access */ 449 /* See if we can provide write access */
427 if (writing) { 450 if (writing) {
428 /*
429 * We assume gup_fast has set dirty on the host PTE.
430 */
431 pgflags |= _PAGE_WRITE; 451 pgflags |= _PAGE_WRITE;
432 } else { 452 } else {
433 local_irq_save(flags); 453 local_irq_save(flags);
434 ptep = find_current_mm_pte(current->mm->pgd, 454 ptep = find_current_mm_pte(current->mm->pgd,
435 hva, NULL, NULL); 455 hva, NULL, NULL);
436 if (ptep && pte_write(*ptep) && pte_dirty(*ptep)) 456 if (ptep && pte_write(*ptep))
437 pgflags |= _PAGE_WRITE; 457 pgflags |= _PAGE_WRITE;
438 local_irq_restore(flags); 458 local_irq_restore(flags);
439 } 459 }
@@ -459,18 +479,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
459 pte = pfn_pte(pfn, __pgprot(pgflags)); 479 pte = pfn_pte(pfn, __pgprot(pgflags));
460 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); 480 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
461 } 481 }
462 if (ret == 0 || ret == -EAGAIN)
463 ret = RESUME_GUEST;
464 482
465 if (page) { 483 if (page) {
466 /* 484 if (!ret && (pgflags & _PAGE_WRITE))
467 * We drop pages[0] here, not page because page might 485 set_page_dirty_lock(page);
468 * have been set to the head page of a compound, but 486 put_page(page);
469 * we have to drop the reference on the correct tail
470 * page to match the get inside gup()
471 */
472 put_page(pages[0]);
473 } 487 }
488
489 if (ret == 0 || ret == -EAGAIN)
490 ret = RESUME_GUEST;
474 return ret; 491 return ret;
475} 492}
476 493
@@ -644,7 +661,7 @@ void kvmppc_free_radix(struct kvm *kvm)
644 continue; 661 continue;
645 pmd = pmd_offset(pud, 0); 662 pmd = pmd_offset(pud, 0);
646 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { 663 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
647 if (pmd_huge(*pmd)) { 664 if (pmd_is_leaf(*pmd)) {
648 pmd_clear(pmd); 665 pmd_clear(pmd);
649 continue; 666 continue;
650 } 667 }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 89707354c2ef..9cb9448163c4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2885,7 +2885,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2885 */ 2885 */
2886 trace_hardirqs_on(); 2886 trace_hardirqs_on();
2887 2887
2888 guest_enter(); 2888 guest_enter_irqoff();
2889 2889
2890 srcu_idx = srcu_read_lock(&vc->kvm->srcu); 2890 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2891 2891
@@ -2893,8 +2893,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2893 2893
2894 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); 2894 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2895 2895
2896 guest_exit();
2897
2898 trace_hardirqs_off(); 2896 trace_hardirqs_off();
2899 set_irq_happened(trap); 2897 set_irq_happened(trap);
2900 2898
@@ -2937,6 +2935,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2937 kvmppc_set_host_core(pcpu); 2935 kvmppc_set_host_core(pcpu);
2938 2936
2939 local_irq_enable(); 2937 local_irq_enable();
2938 guest_exit();
2940 2939
2941 /* Let secondaries go back to the offline loop */ 2940 /* Let secondaries go back to the offline loop */
2942 for (i = 0; i < controlled_threads; ++i) { 2941 for (i = 0; i < controlled_threads; ++i) {
@@ -3656,15 +3655,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
3656 goto up_out; 3655 goto up_out;
3657 3656
3658 psize = vma_kernel_pagesize(vma); 3657 psize = vma_kernel_pagesize(vma);
3659 porder = __ilog2(psize);
3660 3658
3661 up_read(&current->mm->mmap_sem); 3659 up_read(&current->mm->mmap_sem);
3662 3660
3663 /* We can handle 4k, 64k or 16M pages in the VRMA */ 3661 /* We can handle 4k, 64k or 16M pages in the VRMA */
3664 err = -EINVAL; 3662 if (psize >= 0x1000000)
3665 if (!(psize == 0x1000 || psize == 0x10000 || 3663 psize = 0x1000000;
3666 psize == 0x1000000)) 3664 else if (psize >= 0x10000)
3667 goto out_srcu; 3665 psize = 0x10000;
3666 else
3667 psize = 0x1000;
3668 porder = __ilog2(psize);
3668 3669
3669 senc = slb_pgsize_encoding(psize); 3670 senc = slb_pgsize_encoding(psize);
3670 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 3671 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index f31f357b8c5a..d33264697a31 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -320,7 +320,6 @@ kvm_novcpu_exit:
320 stw r12, STACK_SLOT_TRAP(r1) 320 stw r12, STACK_SLOT_TRAP(r1)
321 bl kvmhv_commence_exit 321 bl kvmhv_commence_exit
322 nop 322 nop
323 lwz r12, STACK_SLOT_TRAP(r1)
324 b kvmhv_switch_to_host 323 b kvmhv_switch_to_host
325 324
326/* 325/*
@@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1220 1219
1221secondary_too_late: 1220secondary_too_late:
1222 li r12, 0 1221 li r12, 0
1222 stw r12, STACK_SLOT_TRAP(r1)
1223 cmpdi r4, 0 1223 cmpdi r4, 0
1224 beq 11f 1224 beq 11f
1225 stw r12, VCPU_TRAP(r4) 1225 stw r12, VCPU_TRAP(r4)
@@ -1558,12 +1558,12 @@ mc_cont:
15583: stw r5,VCPU_SLB_MAX(r9) 15583: stw r5,VCPU_SLB_MAX(r9)
1559 1559
1560guest_bypass: 1560guest_bypass:
1561 stw r12, STACK_SLOT_TRAP(r1)
1561 mr r3, r12 1562 mr r3, r12
1562 /* Increment exit count, poke other threads to exit */ 1563 /* Increment exit count, poke other threads to exit */
1563 bl kvmhv_commence_exit 1564 bl kvmhv_commence_exit
1564 nop 1565 nop
1565 ld r9, HSTATE_KVM_VCPU(r13) 1566 ld r9, HSTATE_KVM_VCPU(r13)
1566 lwz r12, VCPU_TRAP(r9)
1567 1567
1568 /* Stop others sending VCPU interrupts to this physical CPU */ 1568 /* Stop others sending VCPU interrupts to this physical CPU */
1569 li r0, -1 1569 li r0, -1
@@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1898 * POWER7/POWER8 guest -> host partition switch code. 1898 * POWER7/POWER8 guest -> host partition switch code.
1899 * We don't have to lock against tlbies but we do 1899 * We don't have to lock against tlbies but we do
1900 * have to coordinate the hardware threads. 1900 * have to coordinate the hardware threads.
1901 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1901 */ 1902 */
1902kvmhv_switch_to_host: 1903kvmhv_switch_to_host:
1903 /* Secondary threads wait for primary to do partition switch */ 1904 /* Secondary threads wait for primary to do partition switch */
@@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
1950END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1951END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1951 1952
1952 /* If HMI, call kvmppc_realmode_hmi_handler() */ 1953 /* If HMI, call kvmppc_realmode_hmi_handler() */
1954 lwz r12, STACK_SLOT_TRAP(r1)
1953 cmpwi r12, BOOK3S_INTERRUPT_HMI 1955 cmpwi r12, BOOK3S_INTERRUPT_HMI
1954 bne 27f 1956 bne 27f
1955 bl kvmppc_realmode_hmi_handler 1957 bl kvmppc_realmode_hmi_handler
1956 nop 1958 nop
1957 cmpdi r3, 0 1959 cmpdi r3, 0
1958 li r12, BOOK3S_INTERRUPT_HMI
1959 /* 1960 /*
1960 * At this point kvmppc_realmode_hmi_handler may have resync-ed 1961 * At this point kvmppc_realmode_hmi_handler may have resync-ed
1961 * the TB, and if it has, we must not subtract the guest timebase 1962 * the TB, and if it has, we must not subtract the guest timebase
@@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
2008 lwz r8, KVM_SPLIT_DO_RESTORE(r3) 2009 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
2009 cmpwi r8, 0 2010 cmpwi r8, 0
2010 beq 47f 2011 beq 47f
2011 stw r12, STACK_SLOT_TRAP(r1)
2012 bl kvmhv_p9_restore_lpcr 2012 bl kvmhv_p9_restore_lpcr
2013 nop 2013 nop
2014 lwz r12, STACK_SLOT_TRAP(r1)
2015 b 48f 2014 b 48f
201647: 201547:
2017END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 2016END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
@@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
2049 li r0, KVM_GUEST_MODE_NONE 2048 li r0, KVM_GUEST_MODE_NONE
2050 stb r0, HSTATE_IN_GUEST(r13) 2049 stb r0, HSTATE_IN_GUEST(r13)
2051 2050
2051 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
2052 ld r0, SFS+PPC_LR_STKOFF(r1) 2052 ld r0, SFS+PPC_LR_STKOFF(r1)
2053 addi r1, r1, SFS 2053 addi r1, r1, SFS
2054 mtlr r0 2054 mtlr r0
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 403e642c78f5..52c205373986 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1345,7 +1345,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1345int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu, 1345int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1346 unsigned int rt, int is_default_endian) 1346 unsigned int rt, int is_default_endian)
1347{ 1347{
1348 enum emulation_result emulated; 1348 enum emulation_result emulated = EMULATE_DONE;
1349 1349
1350 while (vcpu->arch.mmio_vmx_copy_nums) { 1350 while (vcpu->arch.mmio_vmx_copy_nums) {
1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8, 1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
@@ -1608,7 +1608,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1608 1608
1609 kvm_sigset_deactivate(vcpu); 1609 kvm_sigset_deactivate(vcpu);
1610 1610
1611#ifdef CONFIG_ALTIVEC
1611out: 1612out:
1613#endif
1612 vcpu_put(vcpu); 1614 vcpu_put(vcpu);
1613 return r; 1615 return r;
1614} 1616}
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 65154eaa3714..6c8ce15cde7b 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -63,6 +63,7 @@ static inline int init_new_context(struct task_struct *tsk,
63 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 63 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
64 /* pgd_alloc() did not account this pmd */ 64 /* pgd_alloc() did not account this pmd */
65 mm_inc_nr_pmds(mm); 65 mm_inc_nr_pmds(mm);
66 mm_inc_nr_puds(mm);
66 } 67 }
67 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 68 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
68 return 0; 69 return 0;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 13a133a6015c..a5621ea6d123 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -14,6 +14,7 @@
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/cache.h> 15#include <asm/cache.h>
16#include <asm/ctl_reg.h> 16#include <asm/ctl_reg.h>
17#include <asm/dwarf.h>
17#include <asm/errno.h> 18#include <asm/errno.h>
18#include <asm/ptrace.h> 19#include <asm/ptrace.h>
19#include <asm/thread_info.h> 20#include <asm/thread_info.h>
@@ -230,7 +231,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
230 .hidden \name 231 .hidden \name
231 .type \name,@function 232 .type \name,@function
232\name: 233\name:
233 .cfi_startproc 234 CFI_STARTPROC
234#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES 235#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
235 exrl 0,0f 236 exrl 0,0f
236#else 237#else
@@ -239,7 +240,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
239#endif 240#endif
240 j . 241 j .
2410: br \reg 2420: br \reg
242 .cfi_endproc 243 CFI_ENDPROC
243 .endm 244 .endm
244 245
245 GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1 246 GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
@@ -426,13 +427,13 @@ ENTRY(system_call)
426 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 427 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
427 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 428 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
428 stmg %r0,%r7,__PT_R0(%r11) 429 stmg %r0,%r7,__PT_R0(%r11)
429 # clear user controlled register to prevent speculative use
430 xgr %r0,%r0
431 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 430 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
432 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 431 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
433 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 432 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
434 stg %r14,__PT_FLAGS(%r11) 433 stg %r14,__PT_FLAGS(%r11)
435.Lsysc_do_svc: 434.Lsysc_do_svc:
435 # clear user controlled register to prevent speculative use
436 xgr %r0,%r0
436 # load address of system call table 437 # load address of system call table
437 lg %r10,__THREAD_sysc_table(%r13,%r12) 438 lg %r10,__THREAD_sysc_table(%r13,%r12)
438 llgh %r8,__PT_INT_CODE+2(%r11) 439 llgh %r8,__PT_INT_CODE+2(%r11)
@@ -1439,6 +1440,7 @@ cleanup_critical:
1439 stg %r15,__LC_SYSTEM_TIMER 1440 stg %r15,__LC_SYSTEM_TIMER
14400: # update accounting time stamp 14410: # update accounting time stamp
1441 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 1442 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
1443 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
1442 # set up saved register r11 1444 # set up saved register r11
1443 lg %r15,__LC_KERNEL_STACK 1445 lg %r15,__LC_KERNEL_STACK
1444 la %r9,STACK_FRAME_OVERHEAD(%r15) 1446 la %r9,STACK_FRAME_OVERHEAD(%r15)
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index 69d7fcf48158..9aff72d3abda 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -2,8 +2,8 @@
2#include <linux/module.h> 2#include <linux/module.h>
3#include <asm/nospec-branch.h> 3#include <asm/nospec-branch.h>
4 4
5int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF); 5int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
6int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL); 6int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
7 7
8static int __init nospectre_v2_setup_early(char *str) 8static int __init nospectre_v2_setup_early(char *str)
9{ 9{
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 77d7818130db..339ac0964590 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
89 { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
89 { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 90 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
90 { "instruction_epsw", VCPU_STAT(instruction_epsw) }, 91 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
91 { "instruction_gs", VCPU_STAT(instruction_gs) }, 92 { "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2146 /* we still need the basic sca for the ipte control */ 2147 /* we still need the basic sca for the ipte control */
2147 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32); 2148 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2148 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca; 2149 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2150 return;
2149 } 2151 }
2150 read_lock(&vcpu->kvm->arch.sca_lock); 2152 read_lock(&vcpu->kvm->arch.sca_lock);
2151 if (vcpu->kvm->arch.use_esca) { 2153 if (vcpu->kvm->arch.use_esca) {
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 847ddffbf38a..b5cfab711651 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
163 pte_unmap(pte); 163 pte_unmap(pte);
164} 164}
165 165
166void set_pmd_at(struct mm_struct *mm, unsigned long addr,
167 pmd_t *pmdp, pmd_t pmd)
168{
169 pmd_t orig = *pmdp;
170
171 *pmdp = pmd;
172 166
167static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
168 pmd_t orig, pmd_t pmd)
169{
173 if (mm == &init_mm) 170 if (mm == &init_mm)
174 return; 171 return;
175 172
@@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
219 } 216 }
220} 217}
221 218
219void set_pmd_at(struct mm_struct *mm, unsigned long addr,
220 pmd_t *pmdp, pmd_t pmd)
221{
222 pmd_t orig = *pmdp;
223
224 *pmdp = pmd;
225 __set_pmd_acct(mm, addr, orig, pmd);
226}
227
222static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 228static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
223 unsigned long address, pmd_t *pmdp, pmd_t pmd) 229 unsigned long address, pmd_t *pmdp, pmd_t pmd)
224{ 230{
@@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
227 do { 233 do {
228 old = *pmdp; 234 old = *pmdp;
229 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); 235 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
236 __set_pmd_acct(vma->vm_mm, address, old, pmd);
230 237
231 return old; 238 return old;
232} 239}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb7f43f23521..0fa71a78ec99 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2307,7 +2307,7 @@ choice
2307 it can be used to assist security vulnerability exploitation. 2307 it can be used to assist security vulnerability exploitation.
2308 2308
2309 This setting can be changed at boot time via the kernel command 2309 This setting can be changed at boot time via the kernel command
2310 line parameter vsyscall=[native|emulate|none]. 2310 line parameter vsyscall=[emulate|none].
2311 2311
2312 On a system with recent enough glibc (2.14 or newer) and no 2312 On a system with recent enough glibc (2.14 or newer) and no
2313 static binaries, you can say None without a performance penalty 2313 static binaries, you can say None without a performance penalty
@@ -2315,15 +2315,6 @@ choice
2315 2315
2316 If unsure, select "Emulate". 2316 If unsure, select "Emulate".
2317 2317
2318 config LEGACY_VSYSCALL_NATIVE
2319 bool "Native"
2320 help
2321 Actual executable code is located in the fixed vsyscall
2322 address mapping, implementing time() efficiently. Since
2323 this makes the mapping executable, it can be used during
2324 security vulnerability exploitation (traditionally as
2325 ROP gadgets). This configuration is not recommended.
2326
2327 config LEGACY_VSYSCALL_EMULATE 2318 config LEGACY_VSYSCALL_EMULATE
2328 bool "Emulate" 2319 bool "Emulate"
2329 help 2320 help
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e811dd9c5e99..08425c42f8b7 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat)
363 pushq 2*8(%rdi) /* regs->ip */ 363 pushq 2*8(%rdi) /* regs->ip */
364 pushq 1*8(%rdi) /* regs->orig_ax */ 364 pushq 1*8(%rdi) /* regs->orig_ax */
365 365
366 movq (%rdi), %rdi /* restore %rdi */ 366 pushq (%rdi) /* pt_regs->di */
367
368 pushq %rdi /* pt_regs->di */
369 pushq %rsi /* pt_regs->si */ 367 pushq %rsi /* pt_regs->si */
370 pushq %rdx /* pt_regs->dx */ 368 pushq %rdx /* pt_regs->dx */
371 pushq %rcx /* pt_regs->cx */ 369 pushq %rcx /* pt_regs->cx */
@@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat)
406 TRACE_IRQS_ON 404 TRACE_IRQS_ON
407 jmp swapgs_restore_regs_and_return_to_usermode 405 jmp swapgs_restore_regs_and_return_to_usermode
408END(entry_INT80_compat) 406END(entry_INT80_compat)
409
410ENTRY(stub32_clone)
411 /*
412 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
413 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
414 *
415 * The native 64-bit kernel's sys_clone() implements the latter,
416 * so we need to swap arguments here before calling it:
417 */
418 xchg %r8, %rcx
419 jmp sys_clone
420ENDPROC(stub32_clone)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 448ac2161112..2a5e99cff859 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -8,12 +8,12 @@
8# 8#
90 i386 restart_syscall sys_restart_syscall 90 i386 restart_syscall sys_restart_syscall
101 i386 exit sys_exit 101 i386 exit sys_exit
112 i386 fork sys_fork sys_fork 112 i386 fork sys_fork
123 i386 read sys_read 123 i386 read sys_read
134 i386 write sys_write 134 i386 write sys_write
145 i386 open sys_open compat_sys_open 145 i386 open sys_open compat_sys_open
156 i386 close sys_close 156 i386 close sys_close
167 i386 waitpid sys_waitpid sys32_waitpid 167 i386 waitpid sys_waitpid compat_sys_x86_waitpid
178 i386 creat sys_creat 178 i386 creat sys_creat
189 i386 link sys_link 189 i386 link sys_link
1910 i386 unlink sys_unlink 1910 i386 unlink sys_unlink
@@ -78,7 +78,7 @@
7869 i386 ssetmask sys_ssetmask 7869 i386 ssetmask sys_ssetmask
7970 i386 setreuid sys_setreuid16 7970 i386 setreuid sys_setreuid16
8071 i386 setregid sys_setregid16 8071 i386 setregid sys_setregid16
8172 i386 sigsuspend sys_sigsuspend sys_sigsuspend 8172 i386 sigsuspend sys_sigsuspend
8273 i386 sigpending sys_sigpending compat_sys_sigpending 8273 i386 sigpending sys_sigpending compat_sys_sigpending
8374 i386 sethostname sys_sethostname 8374 i386 sethostname sys_sethostname
8475 i386 setrlimit sys_setrlimit compat_sys_setrlimit 8475 i386 setrlimit sys_setrlimit compat_sys_setrlimit
@@ -96,7 +96,7 @@
9687 i386 swapon sys_swapon 9687 i386 swapon sys_swapon
9788 i386 reboot sys_reboot 9788 i386 reboot sys_reboot
9889 i386 readdir sys_old_readdir compat_sys_old_readdir 9889 i386 readdir sys_old_readdir compat_sys_old_readdir
9990 i386 mmap sys_old_mmap sys32_mmap 9990 i386 mmap sys_old_mmap compat_sys_x86_mmap
10091 i386 munmap sys_munmap 10091 i386 munmap sys_munmap
10192 i386 truncate sys_truncate compat_sys_truncate 10192 i386 truncate sys_truncate compat_sys_truncate
10293 i386 ftruncate sys_ftruncate compat_sys_ftruncate 10293 i386 ftruncate sys_ftruncate compat_sys_ftruncate
@@ -126,7 +126,7 @@
126117 i386 ipc sys_ipc compat_sys_ipc 126117 i386 ipc sys_ipc compat_sys_ipc
127118 i386 fsync sys_fsync 127118 i386 fsync sys_fsync
128119 i386 sigreturn sys_sigreturn sys32_sigreturn 128119 i386 sigreturn sys_sigreturn sys32_sigreturn
129120 i386 clone sys_clone stub32_clone 129120 i386 clone sys_clone compat_sys_x86_clone
130121 i386 setdomainname sys_setdomainname 130121 i386 setdomainname sys_setdomainname
131122 i386 uname sys_newuname 131122 i386 uname sys_newuname
132123 i386 modify_ldt sys_modify_ldt 132123 i386 modify_ldt sys_modify_ldt
@@ -186,8 +186,8 @@
186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait 186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
188179 i386 rt_sigsuspend sys_rt_sigsuspend 188179 i386 rt_sigsuspend sys_rt_sigsuspend
189180 i386 pread64 sys_pread64 sys32_pread 189180 i386 pread64 sys_pread64 compat_sys_x86_pread
190181 i386 pwrite64 sys_pwrite64 sys32_pwrite 190181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite
191182 i386 chown sys_chown16 191182 i386 chown sys_chown16
192183 i386 getcwd sys_getcwd 192183 i386 getcwd sys_getcwd
193184 i386 capget sys_capget 193184 i386 capget sys_capget
@@ -196,14 +196,14 @@
196187 i386 sendfile sys_sendfile compat_sys_sendfile 196187 i386 sendfile sys_sendfile compat_sys_sendfile
197188 i386 getpmsg 197188 i386 getpmsg
198189 i386 putpmsg 198189 i386 putpmsg
199190 i386 vfork sys_vfork sys_vfork 199190 i386 vfork sys_vfork
200191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit 200191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit
201192 i386 mmap2 sys_mmap_pgoff 201192 i386 mmap2 sys_mmap_pgoff
202193 i386 truncate64 sys_truncate64 sys32_truncate64 202193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64
203194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64 203194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64
204195 i386 stat64 sys_stat64 sys32_stat64 204195 i386 stat64 sys_stat64 compat_sys_x86_stat64
205196 i386 lstat64 sys_lstat64 sys32_lstat64 205196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64
206197 i386 fstat64 sys_fstat64 sys32_fstat64 206197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64
207198 i386 lchown32 sys_lchown 207198 i386 lchown32 sys_lchown
208199 i386 getuid32 sys_getuid 208199 i386 getuid32 sys_getuid
209200 i386 getgid32 sys_getgid 209200 i386 getgid32 sys_getgid
@@ -231,7 +231,7 @@
231# 222 is unused 231# 222 is unused
232# 223 is unused 232# 223 is unused
233224 i386 gettid sys_gettid 233224 i386 gettid sys_gettid
234225 i386 readahead sys_readahead sys32_readahead 234225 i386 readahead sys_readahead compat_sys_x86_readahead
235226 i386 setxattr sys_setxattr 235226 i386 setxattr sys_setxattr
236227 i386 lsetxattr sys_lsetxattr 236227 i386 lsetxattr sys_lsetxattr
237228 i386 fsetxattr sys_fsetxattr 237228 i386 fsetxattr sys_fsetxattr
@@ -256,7 +256,7 @@
256247 i386 io_getevents sys_io_getevents compat_sys_io_getevents 256247 i386 io_getevents sys_io_getevents compat_sys_io_getevents
257248 i386 io_submit sys_io_submit compat_sys_io_submit 257248 i386 io_submit sys_io_submit compat_sys_io_submit
258249 i386 io_cancel sys_io_cancel 258249 i386 io_cancel sys_io_cancel
259250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 259250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64
260# 251 is available for reuse (was briefly sys_set_zone_reclaim) 260# 251 is available for reuse (was briefly sys_set_zone_reclaim)
261252 i386 exit_group sys_exit_group 261252 i386 exit_group sys_exit_group
262253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 262253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
@@ -278,7 +278,7 @@
278269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 278269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
279270 i386 tgkill sys_tgkill 279270 i386 tgkill sys_tgkill
280271 i386 utimes sys_utimes compat_sys_utimes 280271 i386 utimes sys_utimes compat_sys_utimes
281272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 281272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64
282273 i386 vserver 282273 i386 vserver
283274 i386 mbind sys_mbind 283274 i386 mbind sys_mbind
284275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy 284275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
@@ -306,7 +306,7 @@
306297 i386 mknodat sys_mknodat 306297 i386 mknodat sys_mknodat
307298 i386 fchownat sys_fchownat 307298 i386 fchownat sys_fchownat
308299 i386 futimesat sys_futimesat compat_sys_futimesat 308299 i386 futimesat sys_futimesat compat_sys_futimesat
309300 i386 fstatat64 sys_fstatat64 sys32_fstatat 309300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat
310301 i386 unlinkat sys_unlinkat 310301 i386 unlinkat sys_unlinkat
311302 i386 renameat sys_renameat 311302 i386 renameat sys_renameat
312303 i386 linkat sys_linkat 312303 i386 linkat sys_linkat
@@ -320,7 +320,7 @@
320311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list 320311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list
321312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list 321312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list
322313 i386 splice sys_splice 322313 i386 splice sys_splice
323314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range 323314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range
324315 i386 tee sys_tee 324315 i386 tee sys_tee
325316 i386 vmsplice sys_vmsplice compat_sys_vmsplice 325316 i386 vmsplice sys_vmsplice compat_sys_vmsplice
326317 i386 move_pages sys_move_pages compat_sys_move_pages 326317 i386 move_pages sys_move_pages compat_sys_move_pages
@@ -330,7 +330,7 @@
330321 i386 signalfd sys_signalfd compat_sys_signalfd 330321 i386 signalfd sys_signalfd compat_sys_signalfd
331322 i386 timerfd_create sys_timerfd_create 331322 i386 timerfd_create sys_timerfd_create
332323 i386 eventfd sys_eventfd 332323 i386 eventfd sys_eventfd
333324 i386 fallocate sys_fallocate sys32_fallocate 333324 i386 fallocate sys_fallocate compat_sys_x86_fallocate
334325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime 334325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
335326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime 335326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
336327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 336327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 577fa8adb785..8560ef68a9d6 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -42,10 +42,8 @@
42#define CREATE_TRACE_POINTS 42#define CREATE_TRACE_POINTS
43#include "vsyscall_trace.h" 43#include "vsyscall_trace.h"
44 44
45static enum { EMULATE, NATIVE, NONE } vsyscall_mode = 45static enum { EMULATE, NONE } vsyscall_mode =
46#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE) 46#ifdef CONFIG_LEGACY_VSYSCALL_NONE
47 NATIVE;
48#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
49 NONE; 47 NONE;
50#else 48#else
51 EMULATE; 49 EMULATE;
@@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str)
56 if (str) { 54 if (str) {
57 if (!strcmp("emulate", str)) 55 if (!strcmp("emulate", str))
58 vsyscall_mode = EMULATE; 56 vsyscall_mode = EMULATE;
59 else if (!strcmp("native", str))
60 vsyscall_mode = NATIVE;
61 else if (!strcmp("none", str)) 57 else if (!strcmp("none", str))
62 vsyscall_mode = NONE; 58 vsyscall_mode = NONE;
63 else 59 else
@@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
139 135
140 WARN_ON_ONCE(address != regs->ip); 136 WARN_ON_ONCE(address != regs->ip);
141 137
142 /* This should be unreachable in NATIVE mode. */
143 if (WARN_ON(vsyscall_mode == NATIVE))
144 return false;
145
146 if (vsyscall_mode == NONE) { 138 if (vsyscall_mode == NONE) {
147 warn_bad_vsyscall(KERN_INFO, regs, 139 warn_bad_vsyscall(KERN_INFO, regs,
148 "vsyscall attempted with vsyscall=none"); 140 "vsyscall attempted with vsyscall=none");
@@ -370,9 +362,7 @@ void __init map_vsyscall(void)
370 362
371 if (vsyscall_mode != NONE) { 363 if (vsyscall_mode != NONE) {
372 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, 364 __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
373 vsyscall_mode == NATIVE 365 PAGE_KERNEL_VVAR);
374 ? PAGE_KERNEL_VSYSCALL
375 : PAGE_KERNEL_VVAR);
376 set_vsyscall_pgtable_user_bits(swapper_pg_dir); 366 set_vsyscall_pgtable_user_bits(swapper_pg_dir);
377 } 367 }
378 368
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 6d8044ab1060..22ec65bc033a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = {
3606}; 3606};
3607 3607
3608static struct attribute *skx_upi_uncore_formats_attr[] = { 3608static struct attribute *skx_upi_uncore_formats_attr[] = {
3609 &format_attr_event_ext.attr, 3609 &format_attr_event.attr,
3610 &format_attr_umask_ext.attr, 3610 &format_attr_umask_ext.attr,
3611 &format_attr_edge.attr, 3611 &format_attr_edge.attr,
3612 &format_attr_inv.attr, 3612 &format_attr_inv.attr,
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 96cd33bbfc85..6512498bbef6 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -51,15 +51,14 @@
51#define AA(__x) ((unsigned long)(__x)) 51#define AA(__x) ((unsigned long)(__x))
52 52
53 53
54asmlinkage long sys32_truncate64(const char __user *filename, 54COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename,
55 unsigned long offset_low, 55 unsigned long, offset_low, unsigned long, offset_high)
56 unsigned long offset_high)
57{ 56{
58 return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); 57 return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
59} 58}
60 59
61asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, 60COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd,
62 unsigned long offset_high) 61 unsigned long, offset_low, unsigned long, offset_high)
63{ 62{
64 return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); 63 return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
65} 64}
@@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
96 return 0; 95 return 0;
97} 96}
98 97
99asmlinkage long sys32_stat64(const char __user *filename, 98COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename,
100 struct stat64 __user *statbuf) 99 struct stat64 __user *, statbuf)
101{ 100{
102 struct kstat stat; 101 struct kstat stat;
103 int ret = vfs_stat(filename, &stat); 102 int ret = vfs_stat(filename, &stat);
@@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
107 return ret; 106 return ret;
108} 107}
109 108
110asmlinkage long sys32_lstat64(const char __user *filename, 109COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename,
111 struct stat64 __user *statbuf) 110 struct stat64 __user *, statbuf)
112{ 111{
113 struct kstat stat; 112 struct kstat stat;
114 int ret = vfs_lstat(filename, &stat); 113 int ret = vfs_lstat(filename, &stat);
@@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename,
117 return ret; 116 return ret;
118} 117}
119 118
120asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) 119COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd,
120 struct stat64 __user *, statbuf)
121{ 121{
122 struct kstat stat; 122 struct kstat stat;
123 int ret = vfs_fstat(fd, &stat); 123 int ret = vfs_fstat(fd, &stat);
@@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
126 return ret; 126 return ret;
127} 127}
128 128
129asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, 129COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd,
130 struct stat64 __user *statbuf, int flag) 130 const char __user *, filename,
131 struct stat64 __user *, statbuf, int, flag)
131{ 132{
132 struct kstat stat; 133 struct kstat stat;
133 int error; 134 int error;
@@ -153,7 +154,7 @@ struct mmap_arg_struct32 {
153 unsigned int offset; 154 unsigned int offset;
154}; 155};
155 156
156asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) 157COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg)
157{ 158{
158 struct mmap_arg_struct32 a; 159 struct mmap_arg_struct32 a;
159 160
@@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
167 a.offset>>PAGE_SHIFT); 168 a.offset>>PAGE_SHIFT);
168} 169}
169 170
170asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, 171COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *,
171 int options) 172 stat_addr, int, options)
172{ 173{
173 return compat_sys_wait4(pid, stat_addr, options, NULL); 174 return compat_sys_wait4(pid, stat_addr, options, NULL);
174} 175}
175 176
176/* warning: next two assume little endian */ 177/* warning: next two assume little endian */
177asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, 178COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf,
178 u32 poslo, u32 poshi) 179 u32, count, u32, poslo, u32, poshi)
179{ 180{
180 return sys_pread64(fd, ubuf, count, 181 return sys_pread64(fd, ubuf, count,
181 ((loff_t)AA(poshi) << 32) | AA(poslo)); 182 ((loff_t)AA(poshi) << 32) | AA(poslo));
182} 183}
183 184
184asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, 185COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf,
185 u32 count, u32 poslo, u32 poshi) 186 u32, count, u32, poslo, u32, poshi)
186{ 187{
187 return sys_pwrite64(fd, ubuf, count, 188 return sys_pwrite64(fd, ubuf, count,
188 ((loff_t)AA(poshi) << 32) | AA(poslo)); 189 ((loff_t)AA(poshi) << 32) | AA(poslo));
@@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
193 * Some system calls that need sign extended arguments. This could be 194 * Some system calls that need sign extended arguments. This could be
194 * done by a generic wrapper. 195 * done by a generic wrapper.
195 */ 196 */
196long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, 197COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low,
197 __u32 len_low, __u32 len_high, int advice) 198 __u32, offset_high, __u32, len_low, __u32, len_high,
199 int, advice)
198{ 200{
199 return sys_fadvise64_64(fd, 201 return sys_fadvise64_64(fd,
200 (((u64)offset_high)<<32) | offset_low, 202 (((u64)offset_high)<<32) | offset_low,
@@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
202 advice); 204 advice);
203} 205}
204 206
205asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, 207COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo,
206 size_t count) 208 unsigned int, off_hi, size_t, count)
207{ 209{
208 return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); 210 return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
209} 211}
210 212
211asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, 213COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low,
212 unsigned n_low, unsigned n_hi, int flags) 214 unsigned int, off_hi, unsigned int, n_low,
215 unsigned int, n_hi, int, flags)
213{ 216{
214 return sys_sync_file_range(fd, 217 return sys_sync_file_range(fd,
215 ((u64)off_hi << 32) | off_low, 218 ((u64)off_hi << 32) | off_low,
216 ((u64)n_hi << 32) | n_low, flags); 219 ((u64)n_hi << 32) | n_low, flags);
217} 220}
218 221
219asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, 222COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo,
220 size_t len, int advice) 223 unsigned int, offset_hi, size_t, len, int, advice)
221{ 224{
222 return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, 225 return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
223 len, advice); 226 len, advice);
224} 227}
225 228
226asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, 229COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
227 unsigned offset_hi, unsigned len_lo, 230 unsigned int, offset_lo, unsigned int, offset_hi,
228 unsigned len_hi) 231 unsigned int, len_lo, unsigned int, len_hi)
229{ 232{
230 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, 233 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
231 ((u64)len_hi << 32) | len_lo); 234 ((u64)len_hi << 32) | len_lo);
232} 235}
236
237/*
238 * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
239 */
240COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
241 unsigned long, newsp, int __user *, parent_tidptr,
242 unsigned long, tls_val, int __user *, child_tidptr)
243{
244 return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
245 tls_val);
246}
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f41079da38c5..d554c11e01ff 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -316,6 +316,7 @@
316#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ 316#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
317#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ 317#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
318#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ 318#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
319#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
319#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 320#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
320#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 321#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
321#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 322#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
@@ -328,6 +329,7 @@
328/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ 329/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
329#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ 330#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
330#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ 331#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
332#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
331#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 333#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
332#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 334#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
333#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ 335#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 7fb1047d61c7..6cf0e4cb7b97 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -39,6 +39,7 @@ struct device;
39 39
40enum ucode_state { 40enum ucode_state {
41 UCODE_OK = 0, 41 UCODE_OK = 0,
42 UCODE_NEW,
42 UCODE_UPDATED, 43 UCODE_UPDATED,
43 UCODE_NFOUND, 44 UCODE_NFOUND,
44 UCODE_ERROR, 45 UCODE_ERROR,
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index d0dabeae0505..f928ad9b143f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -183,7 +183,10 @@
183 * otherwise we'll run out of registers. We don't care about CET 183 * otherwise we'll run out of registers. We don't care about CET
184 * here, anyway. 184 * here, anyway.
185 */ 185 */
186# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ 186# define CALL_NOSPEC \
187 ALTERNATIVE( \
188 ANNOTATE_RETPOLINE_SAFE \
189 "call *%[thunk_target]\n", \
187 " jmp 904f;\n" \ 190 " jmp 904f;\n" \
188 " .align 16\n" \ 191 " .align 16\n" \
189 "901: call 903f;\n" \ 192 "901: call 903f;\n" \
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 246f15b4e64c..acfe755562a6 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -174,7 +174,6 @@ enum page_cache_mode {
174#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 174#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
175#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) 175#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
176#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) 176#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
177#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
178#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) 177#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
179#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 178#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
180#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 179#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
@@ -206,7 +205,6 @@ enum page_cache_mode {
206#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) 205#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
207#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) 206#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
208#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) 207#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
209#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
210#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) 208#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
211 209
212#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) 210#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index d6baf23782bc..5c019d23d06b 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[];
10 10
11#if defined(CONFIG_X86_64) 11#if defined(CONFIG_X86_64)
12extern char __end_rodata_hpage_align[]; 12extern char __end_rodata_hpage_align[];
13extern char __entry_trampoline_start[], __entry_trampoline_end[];
13#endif 14#endif
14 15
15#endif /* _ASM_X86_SECTIONS_H */ 16#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 82c34ee25a65..906794aa034e 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -20,31 +20,43 @@
20#include <asm/ia32.h> 20#include <asm/ia32.h>
21 21
22/* ia32/sys_ia32.c */ 22/* ia32/sys_ia32.c */
23asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); 23asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long,
24asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); 24 unsigned long);
25asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long,
26 unsigned long);
25 27
26asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); 28asmlinkage long compat_sys_x86_stat64(const char __user *,
27asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); 29 struct stat64 __user *);
28asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); 30asmlinkage long compat_sys_x86_lstat64(const char __user *,
29asmlinkage long sys32_fstatat(unsigned int, const char __user *, 31 struct stat64 __user *);
32asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *);
33asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *,
30 struct stat64 __user *, int); 34 struct stat64 __user *, int);
31struct mmap_arg_struct32; 35struct mmap_arg_struct32;
32asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); 36asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *);
33 37
34asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); 38asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *,
39 int);
35 40
36asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); 41asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32,
37asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); 42 u32);
43asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32,
44 u32, u32);
38 45
39long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); 46asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32,
40long sys32_vm86_warning(void); 47 int);
41 48
42asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); 49asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int,
43asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, 50 size_t);
44 unsigned, unsigned, int); 51asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int,
45asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); 52 unsigned int, unsigned int,
46asmlinkage long sys32_fallocate(int, int, unsigned, 53 int);
47 unsigned, unsigned, unsigned); 54asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
55 size_t, int);
56asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
57 unsigned int, unsigned int);
58asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
59 unsigned long, int __user *);
48 60
49/* ia32/ia32_signal.c */ 61/* ia32/ia32_signal.c */
50asmlinkage long sys32_sigreturn(void); 62asmlinkage long sys32_sigreturn(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 8b6780751132..5db8b0b10766 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -352,6 +352,7 @@ enum vmcs_field {
352#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ 352#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
353#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ 353#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
354#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ 354#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
355#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
355#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ 356#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
356 357
357/* GUEST_INTERRUPTIBILITY_INFO flags. */ 358/* GUEST_INTERRUPTIBILITY_INFO flags. */
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 91723461dc1f..435db58a7bad 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -30,6 +30,7 @@ struct mce {
30 __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ 30 __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
31 __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ 31 __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
32 __u64 ppin; /* Protected Processor Inventory Number */ 32 __u64 ppin; /* Protected Processor Inventory Number */
33 __u32 microcode;/* Microcode revision */
33}; 34};
34 35
35#define MCE_GET_RECORD_LEN _IOR('M', 1, int) 36#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index d19e903214b4..c3af167d0a70 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
105/* 105/*
106 * Early microcode releases for the Spectre v2 mitigation were broken. 106 * Early microcode releases for the Spectre v2 mitigation were broken.
107 * Information taken from; 107 * Information taken from;
108 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf 108 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
109 * - https://kb.vmware.com/s/article/52345 109 * - https://kb.vmware.com/s/article/52345
110 * - Microcode revisions observed in the wild 110 * - Microcode revisions observed in the wild
111 * - Release note from 20180108 microcode release 111 * - Release note from 20180108 microcode release
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, 123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, 124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, 125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
126 { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
127 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, 126 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
128 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, 127 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
129 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, 128 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
@@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
144{ 143{
145 int i; 144 int i;
146 145
146 /*
147 * We know that the hypervisor lie to us on the microcode version so
148 * we may as well hope that it is running the correct version.
149 */
150 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
151 return false;
152
147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 153 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
148 if (c->x86_model == spectre_bad_microcodes[i].model && 154 if (c->x86_model == spectre_bad_microcodes[i].model &&
149 c->x86_stepping == spectre_bad_microcodes[i].stepping) 155 c->x86_stepping == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8ff94d1e2dce..466f47301334 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -56,6 +56,9 @@
56 56
57static DEFINE_MUTEX(mce_log_mutex); 57static DEFINE_MUTEX(mce_log_mutex);
58 58
59/* sysfs synchronization */
60static DEFINE_MUTEX(mce_sysfs_mutex);
61
59#define CREATE_TRACE_POINTS 62#define CREATE_TRACE_POINTS
60#include <trace/events/mce.h> 63#include <trace/events/mce.h>
61 64
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
130 133
131 if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) 134 if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
132 rdmsrl(MSR_PPIN, m->ppin); 135 rdmsrl(MSR_PPIN, m->ppin);
136
137 m->microcode = boot_cpu_data.microcode;
133} 138}
134 139
135DEFINE_PER_CPU(struct mce, injectm); 140DEFINE_PER_CPU(struct mce, injectm);
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
262 */ 267 */
263 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", 268 pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
264 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, 269 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
265 cpu_data(m->extcpu).microcode); 270 m->microcode);
266} 271}
267 272
268static void print_mce(struct mce *m) 273static void print_mce(struct mce *m)
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
2086 if (kstrtou64(buf, 0, &new) < 0) 2091 if (kstrtou64(buf, 0, &new) < 0)
2087 return -EINVAL; 2092 return -EINVAL;
2088 2093
2094 mutex_lock(&mce_sysfs_mutex);
2089 if (mca_cfg.ignore_ce ^ !!new) { 2095 if (mca_cfg.ignore_ce ^ !!new) {
2090 if (new) { 2096 if (new) {
2091 /* disable ce features */ 2097 /* disable ce features */
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
2098 on_each_cpu(mce_enable_ce, (void *)1, 1); 2104 on_each_cpu(mce_enable_ce, (void *)1, 1);
2099 } 2105 }
2100 } 2106 }
2107 mutex_unlock(&mce_sysfs_mutex);
2108
2101 return size; 2109 return size;
2102} 2110}
2103 2111
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
2110 if (kstrtou64(buf, 0, &new) < 0) 2118 if (kstrtou64(buf, 0, &new) < 0)
2111 return -EINVAL; 2119 return -EINVAL;
2112 2120
2121 mutex_lock(&mce_sysfs_mutex);
2113 if (mca_cfg.cmci_disabled ^ !!new) { 2122 if (mca_cfg.cmci_disabled ^ !!new) {
2114 if (new) { 2123 if (new) {
2115 /* disable cmci */ 2124 /* disable cmci */
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
2121 on_each_cpu(mce_enable_ce, NULL, 1); 2130 on_each_cpu(mce_enable_ce, NULL, 1);
2122 } 2131 }
2123 } 2132 }
2133 mutex_unlock(&mce_sysfs_mutex);
2134
2124 return size; 2135 return size;
2125} 2136}
2126 2137
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
2128 struct device_attribute *attr, 2139 struct device_attribute *attr,
2129 const char *buf, size_t size) 2140 const char *buf, size_t size)
2130{ 2141{
2131 ssize_t ret = device_store_int(s, attr, buf, size); 2142 unsigned long old_check_interval = check_interval;
2143 ssize_t ret = device_store_ulong(s, attr, buf, size);
2144
2145 if (check_interval == old_check_interval)
2146 return ret;
2147
2148 if (check_interval < 1)
2149 check_interval = 1;
2150
2151 mutex_lock(&mce_sysfs_mutex);
2132 mce_restart(); 2152 mce_restart();
2153 mutex_unlock(&mce_sysfs_mutex);
2154
2133 return ret; 2155 return ret;
2134} 2156}
2135 2157
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a998e1a7d46f..48179928ff38 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
339 return -EINVAL; 339 return -EINVAL;
340 340
341 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); 341 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
342 if (ret != UCODE_OK) 342 if (ret > UCODE_UPDATED)
343 return -EINVAL; 343 return -EINVAL;
344 344
345 return 0; 345 return 0;
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
683static enum ucode_state 683static enum ucode_state
684load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) 684load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
685{ 685{
686 struct ucode_patch *p;
686 enum ucode_state ret; 687 enum ucode_state ret;
687 688
688 /* free old equiv table */ 689 /* free old equiv table */
689 free_equiv_cpu_table(); 690 free_equiv_cpu_table();
690 691
691 ret = __load_microcode_amd(family, data, size); 692 ret = __load_microcode_amd(family, data, size);
692 693 if (ret != UCODE_OK) {
693 if (ret != UCODE_OK)
694 cleanup(); 694 cleanup();
695 return ret;
696 }
695 697
696#ifdef CONFIG_X86_32 698 p = find_patch(0);
697 /* save BSP's matching patch for early load */ 699 if (!p) {
698 if (save) { 700 return ret;
699 struct ucode_patch *p = find_patch(0); 701 } else {
700 if (p) { 702 if (boot_cpu_data.microcode == p->patch_id)
701 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); 703 return ret;
702 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), 704
703 PATCH_MAX_SIZE)); 705 ret = UCODE_NEW;
704 }
705 } 706 }
706#endif 707
708 /* save BSP's matching patch for early load */
709 if (!save)
710 return ret;
711
712 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
713 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
714
707 return ret; 715 return ret;
708} 716}
709 717
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index aa1b9a422f2b..10c4fc2c91f8 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -22,13 +22,16 @@
22#define pr_fmt(fmt) "microcode: " fmt 22#define pr_fmt(fmt) "microcode: " fmt
23 23
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stop_machine.h>
25#include <linux/syscore_ops.h> 26#include <linux/syscore_ops.h>
26#include <linux/miscdevice.h> 27#include <linux/miscdevice.h>
27#include <linux/capability.h> 28#include <linux/capability.h>
28#include <linux/firmware.h> 29#include <linux/firmware.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/delay.h>
30#include <linux/mutex.h> 32#include <linux/mutex.h>
31#include <linux/cpu.h> 33#include <linux/cpu.h>
34#include <linux/nmi.h>
32#include <linux/fs.h> 35#include <linux/fs.h>
33#include <linux/mm.h> 36#include <linux/mm.h>
34 37
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
64 */ 67 */
65static DEFINE_MUTEX(microcode_mutex); 68static DEFINE_MUTEX(microcode_mutex);
66 69
70/*
71 * Serialize late loading so that CPUs get updated one-by-one.
72 */
73static DEFINE_SPINLOCK(update_lock);
74
67struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 75struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
68 76
69struct cpu_info_ctx { 77struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
373 return ret; 381 return ret;
374} 382}
375 383
376struct apply_microcode_ctx {
377 enum ucode_state err;
378};
379
380static void apply_microcode_local(void *arg) 384static void apply_microcode_local(void *arg)
381{ 385{
382 struct apply_microcode_ctx *ctx = arg; 386 enum ucode_state *err = arg;
383 387
384 ctx->err = microcode_ops->apply_microcode(smp_processor_id()); 388 *err = microcode_ops->apply_microcode(smp_processor_id());
385} 389}
386 390
387static int apply_microcode_on_target(int cpu) 391static int apply_microcode_on_target(int cpu)
388{ 392{
389 struct apply_microcode_ctx ctx = { .err = 0 }; 393 enum ucode_state err;
390 int ret; 394 int ret;
391 395
392 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); 396 ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
393 if (!ret) 397 if (!ret) {
394 ret = ctx.err; 398 if (err == UCODE_ERROR)
395 399 ret = 1;
400 }
396 return ret; 401 return ret;
397} 402}
398 403
@@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void)
489/* fake device for request_firmware */ 494/* fake device for request_firmware */
490static struct platform_device *microcode_pdev; 495static struct platform_device *microcode_pdev;
491 496
492static enum ucode_state reload_for_cpu(int cpu) 497/*
498 * Late loading dance. Why the heavy-handed stomp_machine effort?
499 *
500 * - HT siblings must be idle and not execute other code while the other sibling
501 * is loading microcode in order to avoid any negative interactions caused by
502 * the loading.
503 *
504 * - In addition, microcode update on the cores must be serialized until this
505 * requirement can be relaxed in the future. Right now, this is conservative
506 * and good.
507 */
508#define SPINUNIT 100 /* 100 nsec */
509
510static int check_online_cpus(void)
493{ 511{
494 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 512 if (num_online_cpus() == num_present_cpus())
495 enum ucode_state ustate; 513 return 0;
496 514
497 if (!uci->valid) 515 pr_err("Not all CPUs online, aborting microcode update.\n");
498 return UCODE_OK; 516
517 return -EINVAL;
518}
519
520static atomic_t late_cpus_in;
521static atomic_t late_cpus_out;
522
523static int __wait_for_cpus(atomic_t *t, long long timeout)
524{
525 int all_cpus = num_online_cpus();
526
527 atomic_inc(t);
499 528
500 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true); 529 while (atomic_read(t) < all_cpus) {
501 if (ustate != UCODE_OK) 530 if (timeout < SPINUNIT) {
502 return ustate; 531 pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
532 all_cpus - atomic_read(t));
533 return 1;
534 }
503 535
504 return apply_microcode_on_target(cpu); 536 ndelay(SPINUNIT);
537 timeout -= SPINUNIT;
538
539 touch_nmi_watchdog();
540 }
541 return 0;
542}
543
544/*
545 * Returns:
546 * < 0 - on error
547 * 0 - no update done
548 * 1 - microcode was updated
549 */
550static int __reload_late(void *info)
551{
552 int cpu = smp_processor_id();
553 enum ucode_state err;
554 int ret = 0;
555
556 /*
557 * Wait for all CPUs to arrive. A load will not be attempted unless all
558 * CPUs show up.
559 * */
560 if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
561 return -1;
562
563 spin_lock(&update_lock);
564 apply_microcode_local(&err);
565 spin_unlock(&update_lock);
566
567 if (err > UCODE_NFOUND) {
568 pr_warn("Error reloading microcode on CPU %d\n", cpu);
569 return -1;
570 /* siblings return UCODE_OK because their engine got updated already */
571 } else if (err == UCODE_UPDATED || err == UCODE_OK) {
572 ret = 1;
573 } else {
574 return ret;
575 }
576
577 /*
578 * Increase the wait timeout to a safe value here since we're
579 * serializing the microcode update and that could take a while on a
580 * large number of CPUs. And that is fine as the *actual* timeout will
581 * be determined by the last CPU finished updating and thus cut short.
582 */
583 if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
584 panic("Timeout during microcode update!\n");
585
586 return ret;
587}
588
589/*
590 * Reload microcode late on all CPUs. Wait for a sec until they
591 * all gather together.
592 */
593static int microcode_reload_late(void)
594{
595 int ret;
596
597 atomic_set(&late_cpus_in, 0);
598 atomic_set(&late_cpus_out, 0);
599
600 ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
601 if (ret > 0)
602 microcode_check();
603
604 return ret;
505} 605}
506 606
507static ssize_t reload_store(struct device *dev, 607static ssize_t reload_store(struct device *dev,
@@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev,
509 const char *buf, size_t size) 609 const char *buf, size_t size)
510{ 610{
511 enum ucode_state tmp_ret = UCODE_OK; 611 enum ucode_state tmp_ret = UCODE_OK;
512 bool do_callback = false; 612 int bsp = boot_cpu_data.cpu_index;
513 unsigned long val; 613 unsigned long val;
514 ssize_t ret = 0; 614 ssize_t ret = 0;
515 int cpu;
516 615
517 ret = kstrtoul(buf, 0, &val); 616 ret = kstrtoul(buf, 0, &val);
518 if (ret) 617 if (ret)
@@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev,
521 if (val != 1) 620 if (val != 1)
522 return size; 621 return size;
523 622
524 get_online_cpus(); 623 tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
525 mutex_lock(&microcode_mutex); 624 if (tmp_ret != UCODE_NEW)
526 for_each_online_cpu(cpu) { 625 return size;
527 tmp_ret = reload_for_cpu(cpu);
528 if (tmp_ret > UCODE_NFOUND) {
529 pr_warn("Error reloading microcode on CPU %d\n", cpu);
530
531 /* set retval for the first encountered reload error */
532 if (!ret)
533 ret = -EINVAL;
534 }
535 626
536 if (tmp_ret == UCODE_UPDATED) 627 get_online_cpus();
537 do_callback = true;
538 }
539 628
540 if (!ret && do_callback) 629 ret = check_online_cpus();
541 microcode_check(); 630 if (ret)
631 goto put;
542 632
633 mutex_lock(&microcode_mutex);
634 ret = microcode_reload_late();
543 mutex_unlock(&microcode_mutex); 635 mutex_unlock(&microcode_mutex);
636
637put:
544 put_online_cpus(); 638 put_online_cpus();
545 639
546 if (!ret) 640 if (ret >= 0)
547 ret = size; 641 ret = size;
548 642
549 return ret; 643 return ret;
@@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
611 if (system_state != SYSTEM_RUNNING) 705 if (system_state != SYSTEM_RUNNING)
612 return UCODE_NFOUND; 706 return UCODE_NFOUND;
613 707
614 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, 708 ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
615 refresh_fw); 709 if (ustate == UCODE_NEW) {
616
617 if (ustate == UCODE_OK) {
618 pr_debug("CPU%d updated upon init\n", cpu); 710 pr_debug("CPU%d updated upon init\n", cpu);
619 apply_microcode_on_target(cpu); 711 apply_microcode_on_target(cpu);
620 } 712 }
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 923054a6b760..32b8e5724f96 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
589 if (!mc) 589 if (!mc)
590 return 0; 590 return 0;
591 591
592 /*
593 * Save us the MSR write below - which is a particular expensive
594 * operation - when the other hyperthread has updated the microcode
595 * already.
596 */
597 rev = intel_get_microcode_revision();
598 if (rev >= mc->hdr.rev) {
599 uci->cpu_sig.rev = rev;
600 return UCODE_OK;
601 }
602
603 /*
604 * Writeback and invalidate caches before updating microcode to avoid
605 * internal issues depending on what the microcode is updating.
606 */
607 native_wbinvd();
608
592 /* write microcode via MSR 0x79 */ 609 /* write microcode via MSR 0x79 */
593 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 610 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
594 611
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
774 791
775static enum ucode_state apply_microcode_intel(int cpu) 792static enum ucode_state apply_microcode_intel(int cpu)
776{ 793{
794 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
795 struct cpuinfo_x86 *c = &cpu_data(cpu);
777 struct microcode_intel *mc; 796 struct microcode_intel *mc;
778 struct ucode_cpu_info *uci;
779 struct cpuinfo_x86 *c;
780 static int prev_rev; 797 static int prev_rev;
781 u32 rev; 798 u32 rev;
782 799
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
784 if (WARN_ON(raw_smp_processor_id() != cpu)) 801 if (WARN_ON(raw_smp_processor_id() != cpu))
785 return UCODE_ERROR; 802 return UCODE_ERROR;
786 803
787 uci = ucode_cpu_info + cpu; 804 /* Look for a newer patch in our cache: */
788 mc = uci->mc; 805 mc = find_patch(uci);
789 if (!mc) { 806 if (!mc) {
790 /* Look for a newer patch in our cache: */ 807 mc = uci->mc;
791 mc = find_patch(uci);
792 if (!mc) 808 if (!mc)
793 return UCODE_NFOUND; 809 return UCODE_NFOUND;
794 } 810 }
795 811
812 /*
813 * Save us the MSR write below - which is a particular expensive
814 * operation - when the other hyperthread has updated the microcode
815 * already.
816 */
817 rev = intel_get_microcode_revision();
818 if (rev >= mc->hdr.rev) {
819 uci->cpu_sig.rev = rev;
820 c->microcode = rev;
821 return UCODE_OK;
822 }
823
824 /*
825 * Writeback and invalidate caches before updating microcode to avoid
826 * internal issues depending on what the microcode is updating.
827 */
828 native_wbinvd();
829
796 /* write microcode via MSR 0x79 */ 830 /* write microcode via MSR 0x79 */
797 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 831 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
798 832
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
813 prev_rev = rev; 847 prev_rev = rev;
814 } 848 }
815 849
816 c = &cpu_data(cpu);
817
818 uci->cpu_sig.rev = rev; 850 uci->cpu_sig.rev = rev;
819 c->microcode = rev; 851 c->microcode = rev;
820 852
@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
830 unsigned int leftover = size; 862 unsigned int leftover = size;
831 unsigned int curr_mc_size = 0, new_mc_size = 0; 863 unsigned int curr_mc_size = 0, new_mc_size = 0;
832 unsigned int csig, cpf; 864 unsigned int csig, cpf;
865 enum ucode_state ret = UCODE_OK;
833 866
834 while (leftover) { 867 while (leftover) {
835 struct microcode_header_intel mc_header; 868 struct microcode_header_intel mc_header;
@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
871 new_mc = mc; 904 new_mc = mc;
872 new_mc_size = mc_size; 905 new_mc_size = mc_size;
873 mc = NULL; /* trigger new vmalloc */ 906 mc = NULL; /* trigger new vmalloc */
907 ret = UCODE_NEW;
874 } 908 }
875 909
876 ucode_ptr += mc_size; 910 ucode_ptr += mc_size;
@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
900 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 934 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
901 cpu, new_rev, uci->cpu_sig.rev); 935 cpu, new_rev, uci->cpu_sig.rev);
902 936
903 return UCODE_OK; 937 return ret;
904} 938}
905 939
906static int get_ucode_fw(void *to, const void *from, size_t n) 940static int get_ucode_fw(void *to, const void *from, size_t n)
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 2f723301eb58..38deafebb21b 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -23,7 +23,7 @@
23/* 23/*
24 * this changes the io permissions bitmap in the current task. 24 * this changes the io permissions bitmap in the current task.
25 */ 25 */
26asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) 26SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
27{ 27{
28 struct thread_struct *t = &current->thread; 28 struct thread_struct *t = &current->thread;
29 struct tss_struct *tss; 29 struct tss_struct *tss;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index bd36f3c33cd0..0715f827607c 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
1168 1168
1169bool arch_within_kprobe_blacklist(unsigned long addr) 1169bool arch_within_kprobe_blacklist(unsigned long addr)
1170{ 1170{
1171 bool is_in_entry_trampoline_section = false;
1172
1173#ifdef CONFIG_X86_64
1174 is_in_entry_trampoline_section =
1175 (addr >= (unsigned long)__entry_trampoline_start &&
1176 addr < (unsigned long)__entry_trampoline_end);
1177#endif
1171 return (addr >= (unsigned long)__kprobes_text_start && 1178 return (addr >= (unsigned long)__kprobes_text_start &&
1172 addr < (unsigned long)__kprobes_text_end) || 1179 addr < (unsigned long)__kprobes_text_end) ||
1173 (addr >= (unsigned long)__entry_text_start && 1180 (addr >= (unsigned long)__entry_text_start &&
1174 addr < (unsigned long)__entry_text_end); 1181 addr < (unsigned long)__entry_text_end) ||
1182 is_in_entry_trampoline_section;
1175} 1183}
1176 1184
1177int __init arch_init_kprobes(void) 1185int __init arch_init_kprobes(void)
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index ac057f9b0763..0d930d8987cc 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void)
43 BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int)); 43 BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
44#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name)) 44#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
45 45
46 BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
47 BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
48 BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8);
49
50 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0);
51 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4);
52 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8);
46 /* 53 /*
47 * Ensure that the size of each si_field never changes. 54 * Ensure that the size of each si_field never changes.
48 * If it does, it is a sign that the 55 * If it does, it is a sign that the
@@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void)
63 CHECK_CSI_SIZE (_kill, 2*sizeof(int)); 70 CHECK_CSI_SIZE (_kill, 2*sizeof(int));
64 CHECK_SI_SIZE (_kill, 2*sizeof(int)); 71 CHECK_SI_SIZE (_kill, 2*sizeof(int));
65 72
73 BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
74 BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
75 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC);
76 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
77
66 CHECK_CSI_OFFSET(_timer); 78 CHECK_CSI_OFFSET(_timer);
67 CHECK_CSI_SIZE (_timer, 3*sizeof(int)); 79 CHECK_CSI_SIZE (_timer, 3*sizeof(int));
68 CHECK_SI_SIZE (_timer, 6*sizeof(int)); 80 CHECK_SI_SIZE (_timer, 6*sizeof(int));
69 81
82 BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10);
83 BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14);
84 BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
85 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C);
86 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10);
87 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
88
70 CHECK_CSI_OFFSET(_rt); 89 CHECK_CSI_OFFSET(_rt);
71 CHECK_CSI_SIZE (_rt, 3*sizeof(int)); 90 CHECK_CSI_SIZE (_rt, 3*sizeof(int));
72 CHECK_SI_SIZE (_rt, 4*sizeof(int)); 91 CHECK_SI_SIZE (_rt, 4*sizeof(int));
73 92
93 BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
94 BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
95 BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
96 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
97 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
98 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
99
74 CHECK_CSI_OFFSET(_sigchld); 100 CHECK_CSI_OFFSET(_sigchld);
75 CHECK_CSI_SIZE (_sigchld, 5*sizeof(int)); 101 CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
76 CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); 102 CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
77 103
104 BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
105 BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
106 BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18);
107 BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20);
108 BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28);
109 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
110 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
111 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14);
112 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18);
113 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C);
114
78#ifdef CONFIG_X86_X32_ABI 115#ifdef CONFIG_X86_X32_ABI
79 CHECK_CSI_OFFSET(_sigchld_x32); 116 CHECK_CSI_OFFSET(_sigchld_x32);
80 CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int)); 117 CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
81 /* no _sigchld_x32 in the generic siginfo_t */ 118 /* no _sigchld_x32 in the generic siginfo_t */
119 BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18);
120 BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20);
82#endif 121#endif
83 122
84 CHECK_CSI_OFFSET(_sigfault); 123 CHECK_CSI_OFFSET(_sigfault);
85 CHECK_CSI_SIZE (_sigfault, 4*sizeof(int)); 124 CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
86 CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); 125 CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
87 126
127 BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
128 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
129
130 BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
131 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
132
133 BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20);
134 BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28);
135 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14);
136 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18);
137
138 BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
139 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
140
88 CHECK_CSI_OFFSET(_sigpoll); 141 CHECK_CSI_OFFSET(_sigpoll);
89 CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); 142 CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
90 CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); 143 CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
91 144
145 BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10);
146 BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18);
147 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C);
148 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10);
149
92 CHECK_CSI_OFFSET(_sigsys); 150 CHECK_CSI_OFFSET(_sigsys);
93 CHECK_CSI_SIZE (_sigsys, 3*sizeof(int)); 151 CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
94 CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); 152 CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
95 153
154 BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10);
155 BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18);
156 BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C);
157 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C);
158 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10);
159 BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14);
160
96 /* any new si_fields should be added here */ 161 /* any new si_fields should be added here */
97} 162}
98 163
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 5edb27f1a2c4..9d0b5af7db91 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
727 return; 727 return;
728 728
729check_vip: 729check_vip:
730 if (VEFLAGS & X86_EFLAGS_VIP) { 730 if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
731 (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
731 save_v86_state(regs, VM86_STI); 732 save_v86_state(regs, VM86_STI);
732 return; 733 return;
733 } 734 }
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9b138a06c1a4..b854ebf5851b 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -118,9 +118,11 @@ SECTIONS
118 118
119#ifdef CONFIG_X86_64 119#ifdef CONFIG_X86_64
120 . = ALIGN(PAGE_SIZE); 120 . = ALIGN(PAGE_SIZE);
121 VMLINUX_SYMBOL(__entry_trampoline_start) = .;
121 _entry_trampoline = .; 122 _entry_trampoline = .;
122 *(.entry_trampoline) 123 *(.entry_trampoline)
123 . = ALIGN(PAGE_SIZE); 124 . = ALIGN(PAGE_SIZE);
125 VMLINUX_SYMBOL(__entry_trampoline_end) = .;
124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); 126 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
125#endif 127#endif
126 128
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f551962ac294..763bb3bade63 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2770 else 2770 else
2771 pte_access &= ~ACC_WRITE_MASK; 2771 pte_access &= ~ACC_WRITE_MASK;
2772 2772
2773 if (!kvm_is_mmio_pfn(pfn))
2774 spte |= shadow_me_mask;
2775
2773 spte |= (u64)pfn << PAGE_SHIFT; 2776 spte |= (u64)pfn << PAGE_SHIFT;
2774 spte |= shadow_me_mask;
2775 2777
2776 if (pte_access & ACC_WRITE_MASK) { 2778 if (pte_access & ACC_WRITE_MASK) {
2777 2779
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 051dab74e4e9..2d87603f9179 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info)
1045 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); 1045 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1046} 1046}
1047 1047
1048/* Undocumented: icebp/int1 */
1049static inline bool is_icebp(u32 intr_info)
1050{
1051 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1052 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1053}
1054
1048static inline bool cpu_has_vmx_msr_bitmap(void) 1055static inline bool cpu_has_vmx_msr_bitmap(void)
1049{ 1056{
1050 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; 1057 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
@@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
6179 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { 6186 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
6180 vcpu->arch.dr6 &= ~15; 6187 vcpu->arch.dr6 &= ~15;
6181 vcpu->arch.dr6 |= dr6 | DR6_RTM; 6188 vcpu->arch.dr6 |= dr6 | DR6_RTM;
6182 if (!(dr6 & ~DR6_RESERVED)) /* icebp */ 6189 if (is_icebp(intr_info))
6183 skip_emulated_instruction(vcpu); 6190 skip_emulated_instruction(vcpu);
6184 6191
6185 kvm_queue_exception(vcpu, DB_VECTOR); 6192 kvm_queue_exception(vcpu, DB_VECTOR);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c88573d90f3e..25a30b5d6582 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address)
330 if (!pmd_k) 330 if (!pmd_k)
331 return -1; 331 return -1;
332 332
333 if (pmd_huge(*pmd_k)) 333 if (pmd_large(*pmd_k))
334 return 0; 334 return 0;
335 335
336 pte_k = pte_offset_kernel(pmd_k, address); 336 pte_k = pte_offset_kernel(pmd_k, address);
@@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address)
475 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) 475 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
476 BUG(); 476 BUG();
477 477
478 if (pud_huge(*pud)) 478 if (pud_large(*pud))
479 return 0; 479 return 0;
480 480
481 pmd = pmd_offset(pud, address); 481 pmd = pmd_offset(pud, address);
@@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address)
486 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) 486 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
487 BUG(); 487 BUG();
488 488
489 if (pmd_huge(*pmd)) 489 if (pmd_large(*pmd))
490 return 0; 490 return 0;
491 491
492 pte_ref = pte_offset_kernel(pmd_ref, address); 492 pte_ref = pte_offset_kernel(pmd_ref, address);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 8b72923f1d35..af11a2890235 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
800 800
801#define PAGE_INUSE 0xFD 801#define PAGE_INUSE 0xFD
802 802
803static void __meminit free_pagetable(struct page *page, int order, 803static void __meminit free_pagetable(struct page *page, int order)
804 struct vmem_altmap *altmap)
805{ 804{
806 unsigned long magic; 805 unsigned long magic;
807 unsigned int nr_pages = 1 << order; 806 unsigned int nr_pages = 1 << order;
808 807
809 if (altmap) {
810 vmem_altmap_free(altmap, nr_pages);
811 return;
812 }
813
814 /* bootmem page has reserved flag */ 808 /* bootmem page has reserved flag */
815 if (PageReserved(page)) { 809 if (PageReserved(page)) {
816 __ClearPageReserved(page); 810 __ClearPageReserved(page);
@@ -826,9 +820,17 @@ static void __meminit free_pagetable(struct page *page, int order,
826 free_pages((unsigned long)page_address(page), order); 820 free_pages((unsigned long)page_address(page), order);
827} 821}
828 822
829static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, 823static void __meminit free_hugepage_table(struct page *page,
830 struct vmem_altmap *altmap) 824 struct vmem_altmap *altmap)
831{ 825{
826 if (altmap)
827 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
828 else
829 free_pagetable(page, get_order(PMD_SIZE));
830}
831
832static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
833{
832 pte_t *pte; 834 pte_t *pte;
833 int i; 835 int i;
834 836
@@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
839 } 841 }
840 842
841 /* free a pte talbe */ 843 /* free a pte talbe */
842 free_pagetable(pmd_page(*pmd), 0, altmap); 844 free_pagetable(pmd_page(*pmd), 0);
843 spin_lock(&init_mm.page_table_lock); 845 spin_lock(&init_mm.page_table_lock);
844 pmd_clear(pmd); 846 pmd_clear(pmd);
845 spin_unlock(&init_mm.page_table_lock); 847 spin_unlock(&init_mm.page_table_lock);
846} 848}
847 849
848static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, 850static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
849 struct vmem_altmap *altmap)
850{ 851{
851 pmd_t *pmd; 852 pmd_t *pmd;
852 int i; 853 int i;
@@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
858 } 859 }
859 860
860 /* free a pmd talbe */ 861 /* free a pmd talbe */
861 free_pagetable(pud_page(*pud), 0, altmap); 862 free_pagetable(pud_page(*pud), 0);
862 spin_lock(&init_mm.page_table_lock); 863 spin_lock(&init_mm.page_table_lock);
863 pud_clear(pud); 864 pud_clear(pud);
864 spin_unlock(&init_mm.page_table_lock); 865 spin_unlock(&init_mm.page_table_lock);
865} 866}
866 867
867static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, 868static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
868 struct vmem_altmap *altmap)
869{ 869{
870 pud_t *pud; 870 pud_t *pud;
871 int i; 871 int i;
@@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
877 } 877 }
878 878
879 /* free a pud talbe */ 879 /* free a pud talbe */
880 free_pagetable(p4d_page(*p4d), 0, altmap); 880 free_pagetable(p4d_page(*p4d), 0);
881 spin_lock(&init_mm.page_table_lock); 881 spin_lock(&init_mm.page_table_lock);
882 p4d_clear(p4d); 882 p4d_clear(p4d);
883 spin_unlock(&init_mm.page_table_lock); 883 spin_unlock(&init_mm.page_table_lock);
@@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
885 885
886static void __meminit 886static void __meminit
887remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, 887remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
888 struct vmem_altmap *altmap, bool direct) 888 bool direct)
889{ 889{
890 unsigned long next, pages = 0; 890 unsigned long next, pages = 0;
891 pte_t *pte; 891 pte_t *pte;
@@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
916 * freed when offlining, or simplely not in use. 916 * freed when offlining, or simplely not in use.
917 */ 917 */
918 if (!direct) 918 if (!direct)
919 free_pagetable(pte_page(*pte), 0, altmap); 919 free_pagetable(pte_page(*pte), 0);
920 920
921 spin_lock(&init_mm.page_table_lock); 921 spin_lock(&init_mm.page_table_lock);
922 pte_clear(&init_mm, addr, pte); 922 pte_clear(&init_mm, addr, pte);
@@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
939 939
940 page_addr = page_address(pte_page(*pte)); 940 page_addr = page_address(pte_page(*pte));
941 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { 941 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
942 free_pagetable(pte_page(*pte), 0, altmap); 942 free_pagetable(pte_page(*pte), 0);
943 943
944 spin_lock(&init_mm.page_table_lock); 944 spin_lock(&init_mm.page_table_lock);
945 pte_clear(&init_mm, addr, pte); 945 pte_clear(&init_mm, addr, pte);
@@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
974 if (IS_ALIGNED(addr, PMD_SIZE) && 974 if (IS_ALIGNED(addr, PMD_SIZE) &&
975 IS_ALIGNED(next, PMD_SIZE)) { 975 IS_ALIGNED(next, PMD_SIZE)) {
976 if (!direct) 976 if (!direct)
977 free_pagetable(pmd_page(*pmd), 977 free_hugepage_table(pmd_page(*pmd),
978 get_order(PMD_SIZE), 978 altmap);
979 altmap);
980 979
981 spin_lock(&init_mm.page_table_lock); 980 spin_lock(&init_mm.page_table_lock);
982 pmd_clear(pmd); 981 pmd_clear(pmd);
@@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
989 page_addr = page_address(pmd_page(*pmd)); 988 page_addr = page_address(pmd_page(*pmd));
990 if (!memchr_inv(page_addr, PAGE_INUSE, 989 if (!memchr_inv(page_addr, PAGE_INUSE,
991 PMD_SIZE)) { 990 PMD_SIZE)) {
992 free_pagetable(pmd_page(*pmd), 991 free_hugepage_table(pmd_page(*pmd),
993 get_order(PMD_SIZE), 992 altmap);
994 altmap);
995 993
996 spin_lock(&init_mm.page_table_lock); 994 spin_lock(&init_mm.page_table_lock);
997 pmd_clear(pmd); 995 pmd_clear(pmd);
@@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
1003 } 1001 }
1004 1002
1005 pte_base = (pte_t *)pmd_page_vaddr(*pmd); 1003 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
1006 remove_pte_table(pte_base, addr, next, altmap, direct); 1004 remove_pte_table(pte_base, addr, next, direct);
1007 free_pte_table(pte_base, pmd, altmap); 1005 free_pte_table(pte_base, pmd);
1008 } 1006 }
1009 1007
1010 /* Call free_pmd_table() in remove_pud_table(). */ 1008 /* Call free_pmd_table() in remove_pud_table(). */
@@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1033 IS_ALIGNED(next, PUD_SIZE)) { 1031 IS_ALIGNED(next, PUD_SIZE)) {
1034 if (!direct) 1032 if (!direct)
1035 free_pagetable(pud_page(*pud), 1033 free_pagetable(pud_page(*pud),
1036 get_order(PUD_SIZE), 1034 get_order(PUD_SIZE));
1037 altmap);
1038 1035
1039 spin_lock(&init_mm.page_table_lock); 1036 spin_lock(&init_mm.page_table_lock);
1040 pud_clear(pud); 1037 pud_clear(pud);
@@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1048 if (!memchr_inv(page_addr, PAGE_INUSE, 1045 if (!memchr_inv(page_addr, PAGE_INUSE,
1049 PUD_SIZE)) { 1046 PUD_SIZE)) {
1050 free_pagetable(pud_page(*pud), 1047 free_pagetable(pud_page(*pud),
1051 get_order(PUD_SIZE), 1048 get_order(PUD_SIZE));
1052 altmap);
1053 1049
1054 spin_lock(&init_mm.page_table_lock); 1050 spin_lock(&init_mm.page_table_lock);
1055 pud_clear(pud); 1051 pud_clear(pud);
@@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
1062 1058
1063 pmd_base = pmd_offset(pud, 0); 1059 pmd_base = pmd_offset(pud, 0);
1064 remove_pmd_table(pmd_base, addr, next, direct, altmap); 1060 remove_pmd_table(pmd_base, addr, next, direct, altmap);
1065 free_pmd_table(pmd_base, pud, altmap); 1061 free_pmd_table(pmd_base, pud);
1066 } 1062 }
1067 1063
1068 if (direct) 1064 if (direct)
@@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
1094 * to adapt for boot-time switching between 4 and 5 level page tables. 1090 * to adapt for boot-time switching between 4 and 5 level page tables.
1095 */ 1091 */
1096 if (CONFIG_PGTABLE_LEVELS == 5) 1092 if (CONFIG_PGTABLE_LEVELS == 5)
1097 free_pud_table(pud_base, p4d, altmap); 1093 free_pud_table(pud_base, p4d);
1098 } 1094 }
1099 1095
1100 if (direct) 1096 if (direct)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 004abf9ebf12..34cda7e0551b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd)
702 702
703 return 0; 703 return 0;
704} 704}
705
706/**
707 * pud_free_pmd_page - Clear pud entry and free pmd page.
708 * @pud: Pointer to a PUD.
709 *
710 * Context: The pud range has been unmaped and TLB purged.
711 * Return: 1 if clearing the entry succeeded. 0 otherwise.
712 */
713int pud_free_pmd_page(pud_t *pud)
714{
715 pmd_t *pmd;
716 int i;
717
718 if (pud_none(*pud))
719 return 1;
720
721 pmd = (pmd_t *)pud_page_vaddr(*pud);
722
723 for (i = 0; i < PTRS_PER_PMD; i++)
724 if (!pmd_free_pte_page(&pmd[i]))
725 return 0;
726
727 pud_clear(pud);
728 free_page((unsigned long)pmd);
729
730 return 1;
731}
732
733/**
734 * pmd_free_pte_page - Clear pmd entry and free pte page.
735 * @pmd: Pointer to a PMD.
736 *
737 * Context: The pmd range has been unmaped and TLB purged.
738 * Return: 1 if clearing the entry succeeded. 0 otherwise.
739 */
740int pmd_free_pte_page(pmd_t *pmd)
741{
742 pte_t *pte;
743
744 if (pmd_none(*pmd))
745 return 1;
746
747 pte = (pte_t *)pmd_page_vaddr(*pmd);
748 pmd_clear(pmd);
749 free_page((unsigned long)pte);
750
751 return 1;
752}
705#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 753#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index ce38f165489b..631507f0c198 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void)
332} 332}
333 333
334/* 334/*
335 * Clone the ESPFIX P4D into the user space visinble page table 335 * Clone the ESPFIX P4D into the user space visible page table
336 */ 336 */
337static void __init pti_setup_espfix64(void) 337static void __init pti_setup_espfix64(void)
338{ 338{
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index eb661fff94d7..b725154182cc 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1223,7 +1223,7 @@ skip_init_addrs:
1223 * may converge on the last pass. In such case do one more 1223 * may converge on the last pass. In such case do one more
1224 * pass to emit the final image 1224 * pass to emit the final image
1225 */ 1225 */
1226 for (pass = 0; pass < 10 || image; pass++) { 1226 for (pass = 0; pass < 20 || image; pass++) {
1227 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 1227 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1228 if (proglen <= 0) { 1228 if (proglen <= 0) {
1229 image = NULL; 1229 image = NULL;
@@ -1250,6 +1250,7 @@ skip_init_addrs:
1250 } 1250 }
1251 } 1251 }
1252 oldproglen = proglen; 1252 oldproglen = proglen;
1253 cond_resched();
1253 } 1254 }
1254 1255
1255 if (bpf_jit_enable > 1) 1256 if (bpf_jit_enable > 1)
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 11b113f8e367..ebb626ffb5fa 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
74 res.start = gas->address; 74 res.start = gas->address;
75 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 75 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
76 res.flags = IORESOURCE_MEM; 76 res.flags = IORESOURCE_MEM;
77 res.end = res.start + ALIGN(gas->access_width, 4); 77 res.end = res.start + ALIGN(gas->access_width, 4) - 1;
78 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 78 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
79 res.flags = IORESOURCE_IO; 79 res.flags = IORESOURCE_IO;
80 res.end = res.start + gas->access_width; 80 res.end = res.start + gas->access_width - 1;
81 } else { 81 } else {
82 pr_warn("Unsupported address space: %u\n", 82 pr_warn("Unsupported address space: %u\n",
83 gas->space_id); 83 gas->space_id);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7128488a3a72..f2eb6c37ea0a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,7 +70,6 @@ static async_cookie_t async_cookie;
70static bool battery_driver_registered; 70static bool battery_driver_registered;
71static int battery_bix_broken_package; 71static int battery_bix_broken_package;
72static int battery_notification_delay_ms; 72static int battery_notification_delay_ms;
73static int battery_full_discharging;
74static unsigned int cache_time = 1000; 73static unsigned int cache_time = 1000;
75module_param(cache_time, uint, 0644); 74module_param(cache_time, uint, 0644);
76MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 75MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
215 return -ENODEV; 214 return -ENODEV;
216 switch (psp) { 215 switch (psp) {
217 case POWER_SUPPLY_PROP_STATUS: 216 case POWER_SUPPLY_PROP_STATUS:
218 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) { 217 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
219 if (battery_full_discharging && battery->rate_now == 0) 218 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
220 val->intval = POWER_SUPPLY_STATUS_FULL; 219 else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
221 else
222 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
223 } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
224 val->intval = POWER_SUPPLY_STATUS_CHARGING; 220 val->intval = POWER_SUPPLY_STATUS_CHARGING;
225 else if (acpi_battery_is_charged(battery)) 221 else if (acpi_battery_is_charged(battery))
226 val->intval = POWER_SUPPLY_STATUS_FULL; 222 val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
1170 return 0; 1166 return 0;
1171} 1167}
1172 1168
1173static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
1174{
1175 battery_full_discharging = 1;
1176 return 0;
1177}
1178
1179static const struct dmi_system_id bat_dmi_table[] __initconst = { 1169static const struct dmi_system_id bat_dmi_table[] __initconst = {
1180 { 1170 {
1181 .callback = battery_bix_broken_package_quirk, 1171 .callback = battery_bix_broken_package_quirk,
@@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
1193 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"), 1183 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
1194 }, 1184 },
1195 }, 1185 },
1196 {
1197 .callback = battery_full_discharging_quirk,
1198 .ident = "ASUS GL502VSK",
1199 .matches = {
1200 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1201 DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
1202 },
1203 },
1204 {
1205 .callback = battery_full_discharging_quirk,
1206 .ident = "ASUS UX305LA",
1207 .matches = {
1208 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1209 DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
1210 },
1211 },
1212 {
1213 .callback = battery_full_discharging_quirk,
1214 .ident = "ASUS UX360UA",
1215 .matches = {
1216 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1217 DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
1218 },
1219 },
1220 {
1221 .callback = battery_full_discharging_quirk,
1222 .ident = "ASUS UX410UAK",
1223 .matches = {
1224 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1225 DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
1226 },
1227 },
1228 {}, 1186 {},
1229}; 1187};
1230 1188
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index bbe48ad20886..eb09ef55c38a 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2675 else 2675 else
2676 ndr_desc->numa_node = NUMA_NO_NODE; 2676 ndr_desc->numa_node = NUMA_NO_NODE;
2677 2677
2678 if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2678 /*
2679 * Persistence domain bits are hierarchical, if
2680 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2681 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2682 */
2683 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2679 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2684 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2680 2685 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2681 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2682 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2686 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2683 2687
2684 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2688 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 8ccaae3550d2..85167603b9c9 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
103 */ 103 */
104int acpi_map_pxm_to_online_node(int pxm) 104int acpi_map_pxm_to_online_node(int pxm)
105{ 105{
106 int node, n, dist, min_dist; 106 int node, min_node;
107 107
108 node = acpi_map_pxm_to_node(pxm); 108 node = acpi_map_pxm_to_node(pxm);
109 109
110 if (node == NUMA_NO_NODE) 110 if (node == NUMA_NO_NODE)
111 node = 0; 111 node = 0;
112 112
113 min_node = node;
113 if (!node_online(node)) { 114 if (!node_online(node)) {
114 min_dist = INT_MAX; 115 int min_dist = INT_MAX, dist, n;
116
115 for_each_online_node(n) { 117 for_each_online_node(n) {
116 dist = node_distance(node, n); 118 dist = node_distance(node, n);
117 if (dist < min_dist) { 119 if (dist < min_dist) {
118 min_dist = dist; 120 min_dist = dist;
119 node = n; 121 min_node = n;
120 } 122 }
121 } 123 }
122 } 124 }
123 125
124 return node; 126 return min_node;
125} 127}
126EXPORT_SYMBOL(acpi_map_pxm_to_online_node); 128EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
127 129
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 355a95a83a34..1ff17799769d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
550 .driver_data = board_ahci_yes_fbs }, 550 .driver_data = board_ahci_yes_fbs },
551 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), 551 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
552 .driver_data = board_ahci_yes_fbs }, 552 .driver_data = board_ahci_yes_fbs },
553 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), 553 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
554 .driver_data = board_ahci_yes_fbs },
555 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
554 .driver_data = board_ahci_yes_fbs }, 556 .driver_data = board_ahci_yes_fbs },
555 557
556 /* Promise */ 558 /* Promise */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index a0de7a38430c..7adcf3caabd0 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap)
665 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 665 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
666 return 0; 666 return 0;
667 667
668 /*
669 * Don't try to issue commands but return with ENODEV if the
670 * AHCI controller not available anymore (e.g. due to PCIe hot
671 * unplugging). Otherwise a 500ms delay for each port is added.
672 */
673 if (tmp == 0xffffffff) {
674 dev_err(ap->host->dev, "AHCI controller unavailable!\n");
675 return -ENODEV;
676 }
677
668 /* setting HBA to idle */ 678 /* setting HBA to idle */
669 tmp &= ~PORT_CMD_START; 679 tmp &= ~PORT_CMD_START;
670 writel(tmp, port_mmio + PORT_CMD); 680 writel(tmp, port_mmio + PORT_CMD);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 341d0ef82cbd..30cc8f1a31e1 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
340 * 2) regulator for controlling the targets power (optional) 340 * 2) regulator for controlling the targets power (optional)
341 * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, 341 * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
342 * or for non devicetree enabled platforms a single clock 342 * or for non devicetree enabled platforms a single clock
343 * 4) phys (optional) 343 * 4) phys (optional)
344 * 344 *
345 * RETURNS: 345 * RETURNS:
346 * The allocated ahci_host_priv on success, otherwise an ERR_PTR value 346 * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3c09122bf038..7431ccd03316 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4530 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4530 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4531 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4531 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4532 4532
4533 /* Crucial BX100 SSD 500GB has broken LPM support */
4534 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4535
4536 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4537 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4538 ATA_HORKAGE_ZERO_AFTER_TRIM |
4539 ATA_HORKAGE_NOLPM, },
4540 /* 512GB MX100 with newer firmware has only LPM issues */
4541 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4542 ATA_HORKAGE_NOLPM, },
4543
4544 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4545 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4546 ATA_HORKAGE_ZERO_AFTER_TRIM |
4547 ATA_HORKAGE_NOLPM, },
4548 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4549 ATA_HORKAGE_ZERO_AFTER_TRIM |
4550 ATA_HORKAGE_NOLPM, },
4551
4533 /* devices that don't properly handle queued TRIM commands */ 4552 /* devices that don't properly handle queued TRIM commands */
4534 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4553 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4535 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4554 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4541 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4560 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4542 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4561 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4543 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4562 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4544 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4563 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4564 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4565 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4545 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4566 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4546 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4567 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4547 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4568 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5401 * We guarantee to LLDs that they will have at least one 5422 * We guarantee to LLDs that they will have at least one
5402 * non-zero sg if the command is a data command. 5423 * non-zero sg if the command is a data command.
5403 */ 5424 */
5404 if (WARN_ON_ONCE(ata_is_data(prot) && 5425 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5405 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5406 goto sys_err; 5426 goto sys_err;
5407 5427
5408 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5428 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 11c3137d7b0a..c016829a38fd 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
815 815
816 if (ap->pflags & ATA_PFLAG_LOADING) 816 if (ap->pflags & ATA_PFLAG_LOADING)
817 ap->pflags &= ~ATA_PFLAG_LOADING; 817 ap->pflags &= ~ATA_PFLAG_LOADING;
818 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 818 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
819 !(ap->flags & ATA_FLAG_SAS_HOST))
819 schedule_delayed_work(&ap->hotplug_task, 0); 820 schedule_delayed_work(&ap->hotplug_task, 0);
820 821
821 if (ap->pflags & ATA_PFLAG_RECOVERED) 822 if (ap->pflags & ATA_PFLAG_RECOVERED)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 66be961c93a4..89a9d4a2efc8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
3316 goto invalid_fld; 3316 goto invalid_fld;
3317 } 3317 }
3318 3318
3319 /* We may not issue NCQ commands to devices not supporting NCQ */
3320 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
3321 fp = 1;
3322 goto invalid_fld;
3323 }
3324
3319 /* sanity check for pio multi commands */ 3325 /* sanity check for pio multi commands */
3320 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { 3326 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
3321 fp = 1; 3327 fp = 1;
@@ -4282,7 +4288,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
4282#ifdef ATA_DEBUG 4288#ifdef ATA_DEBUG
4283 struct scsi_device *scsidev = cmd->device; 4289 struct scsi_device *scsidev = cmd->device;
4284 4290
4285 DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", 4291 DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
4286 ap->print_id, 4292 ap->print_id,
4287 scsidev->channel, scsidev->id, scsidev->lun, 4293 scsidev->channel, scsidev->id, scsidev->lun,
4288 cmd->cmnd); 4294 cmd->cmnd);
@@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
4309 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 4315 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
4310 /* relay SCSI command to ATAPI device */ 4316 /* relay SCSI command to ATAPI device */
4311 int len = COMMAND_SIZE(scsi_op); 4317 int len = COMMAND_SIZE(scsi_op);
4312 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) 4318 if (unlikely(len > scmd->cmd_len ||
4319 len > dev->cdb_len ||
4320 scmd->cmd_len > ATAPI_CDB_LEN))
4313 goto bad_cdb_len; 4321 goto bad_cdb_len;
4314 4322
4315 xlat_func = atapi_xlat; 4323 xlat_func = atapi_xlat;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 80ee2f2a50d0..6456e07db72a 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -146,6 +146,7 @@
146enum sata_rcar_type { 146enum sata_rcar_type {
147 RCAR_GEN1_SATA, 147 RCAR_GEN1_SATA,
148 RCAR_GEN2_SATA, 148 RCAR_GEN2_SATA,
149 RCAR_GEN3_SATA,
149 RCAR_R8A7790_ES1_SATA, 150 RCAR_R8A7790_ES1_SATA,
150}; 151};
151 152
@@ -784,26 +785,11 @@ static void sata_rcar_setup_port(struct ata_host *host)
784 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); 785 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
785} 786}
786 787
787static void sata_rcar_init_controller(struct ata_host *host) 788static void sata_rcar_init_module(struct sata_rcar_priv *priv)
788{ 789{
789 struct sata_rcar_priv *priv = host->private_data;
790 void __iomem *base = priv->base; 790 void __iomem *base = priv->base;
791 u32 val; 791 u32 val;
792 792
793 /* reset and setup phy */
794 switch (priv->type) {
795 case RCAR_GEN1_SATA:
796 sata_rcar_gen1_phy_init(priv);
797 break;
798 case RCAR_GEN2_SATA:
799 case RCAR_R8A7790_ES1_SATA:
800 sata_rcar_gen2_phy_init(priv);
801 break;
802 default:
803 dev_warn(host->dev, "SATA phy is not initialized\n");
804 break;
805 }
806
807 /* SATA-IP reset state */ 793 /* SATA-IP reset state */
808 val = ioread32(base + ATAPI_CONTROL1_REG); 794 val = ioread32(base + ATAPI_CONTROL1_REG);
809 val |= ATAPI_CONTROL1_RESET; 795 val |= ATAPI_CONTROL1_RESET;
@@ -824,10 +810,33 @@ static void sata_rcar_init_controller(struct ata_host *host)
824 /* ack and mask */ 810 /* ack and mask */
825 iowrite32(0, base + SATAINTSTAT_REG); 811 iowrite32(0, base + SATAINTSTAT_REG);
826 iowrite32(0x7ff, base + SATAINTMASK_REG); 812 iowrite32(0x7ff, base + SATAINTMASK_REG);
813
827 /* enable interrupts */ 814 /* enable interrupts */
828 iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); 815 iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
829} 816}
830 817
818static void sata_rcar_init_controller(struct ata_host *host)
819{
820 struct sata_rcar_priv *priv = host->private_data;
821
822 /* reset and setup phy */
823 switch (priv->type) {
824 case RCAR_GEN1_SATA:
825 sata_rcar_gen1_phy_init(priv);
826 break;
827 case RCAR_GEN2_SATA:
828 case RCAR_GEN3_SATA:
829 case RCAR_R8A7790_ES1_SATA:
830 sata_rcar_gen2_phy_init(priv);
831 break;
832 default:
833 dev_warn(host->dev, "SATA phy is not initialized\n");
834 break;
835 }
836
837 sata_rcar_init_module(priv);
838}
839
831static const struct of_device_id sata_rcar_match[] = { 840static const struct of_device_id sata_rcar_match[] = {
832 { 841 {
833 /* Deprecated by "renesas,sata-r8a7779" */ 842 /* Deprecated by "renesas,sata-r8a7779" */
@@ -856,7 +865,7 @@ static const struct of_device_id sata_rcar_match[] = {
856 }, 865 },
857 { 866 {
858 .compatible = "renesas,sata-r8a7795", 867 .compatible = "renesas,sata-r8a7795",
859 .data = (void *)RCAR_GEN2_SATA 868 .data = (void *)RCAR_GEN3_SATA
860 }, 869 },
861 { 870 {
862 .compatible = "renesas,rcar-gen2-sata", 871 .compatible = "renesas,rcar-gen2-sata",
@@ -864,7 +873,7 @@ static const struct of_device_id sata_rcar_match[] = {
864 }, 873 },
865 { 874 {
866 .compatible = "renesas,rcar-gen3-sata", 875 .compatible = "renesas,rcar-gen3-sata",
867 .data = (void *)RCAR_GEN2_SATA 876 .data = (void *)RCAR_GEN3_SATA
868 }, 877 },
869 { }, 878 { },
870}; 879};
@@ -982,11 +991,18 @@ static int sata_rcar_resume(struct device *dev)
982 if (ret) 991 if (ret)
983 return ret; 992 return ret;
984 993
985 /* ack and mask */ 994 if (priv->type == RCAR_GEN3_SATA) {
986 iowrite32(0, base + SATAINTSTAT_REG); 995 sata_rcar_gen2_phy_init(priv);
987 iowrite32(0x7ff, base + SATAINTMASK_REG); 996 sata_rcar_init_module(priv);
988 /* enable interrupts */ 997 } else {
989 iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); 998 /* ack and mask */
999 iowrite32(0, base + SATAINTSTAT_REG);
1000 iowrite32(0x7ff, base + SATAINTMASK_REG);
1001
1002 /* enable interrupts */
1003 iowrite32(ATAPI_INT_ENABLE_SATAINT,
1004 base + ATAPI_INT_ENABLE_REG);
1005 }
990 1006
991 ata_host_resume(host); 1007 ata_host_resume(host);
992 1008
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 9180b9bd5821..834509506ef6 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = {
97static void malta_update(struct img_ascii_lcd_ctx *ctx) 97static void malta_update(struct img_ascii_lcd_ctx *ctx)
98{ 98{
99 unsigned int i; 99 unsigned int i;
100 int err; 100 int err = 0;
101 101
102 for (i = 0; i < ctx->cfg->num_chars; i++) { 102 for (i = 0; i < ctx->cfg->num_chars; i++) {
103 err = regmap_write(ctx->regmap, 103 err = regmap_write(ctx->regmap,
@@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
180static void sead3_update(struct img_ascii_lcd_ctx *ctx) 180static void sead3_update(struct img_ascii_lcd_ctx *ctx)
181{ 181{
182 unsigned int i; 182 unsigned int i;
183 int err; 183 int err = 0;
184 184
185 for (i = 0; i < ctx->cfg->num_chars; i++) { 185 for (i = 0; i < ctx->cfg->num_chars; i++) {
186 err = sead3_wait_lcd_idle(ctx); 186 err = sead3_wait_lcd_idle(ctx);
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
224 224
225/** 225/**
226 * img_ascii_lcd_scroll() - scroll the display by a character 226 * img_ascii_lcd_scroll() - scroll the display by a character
227 * @arg: really a pointer to the private data structure 227 * @t: really a pointer to the private data structure
228 * 228 *
229 * Scroll the current message along the LCD by one character, rearming the 229 * Scroll the current message along the LCD by one character, rearming the
230 * timer if required. 230 * timer if required.
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index ea7869c0d7f9..ec5e8800f8ad 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1372,7 +1372,7 @@ static void panel_process_inputs(void)
1372 break; 1372 break;
1373 input->rise_timer = 0; 1373 input->rise_timer = 0;
1374 input->state = INPUT_ST_RISING; 1374 input->state = INPUT_ST_RISING;
1375 /* no break here, fall through */ 1375 /* fall through */
1376 case INPUT_ST_RISING: 1376 case INPUT_ST_RISING:
1377 if ((phys_curr & input->mask) != input->value) { 1377 if ((phys_curr & input->mask) != input->value) {
1378 input->state = INPUT_ST_LOW; 1378 input->state = INPUT_ST_LOW;
@@ -1385,11 +1385,11 @@ static void panel_process_inputs(void)
1385 } 1385 }
1386 input->high_timer = 0; 1386 input->high_timer = 0;
1387 input->state = INPUT_ST_HIGH; 1387 input->state = INPUT_ST_HIGH;
1388 /* no break here, fall through */ 1388 /* fall through */
1389 case INPUT_ST_HIGH: 1389 case INPUT_ST_HIGH:
1390 if (input_state_high(input)) 1390 if (input_state_high(input))
1391 break; 1391 break;
1392 /* no break here, fall through */ 1392 /* fall through */
1393 case INPUT_ST_FALLING: 1393 case INPUT_ST_FALLING:
1394 input_state_falling(input); 1394 input_state_falling(input);
1395 } 1395 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 87855b5123a6..ee62d2d517bf 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
266 struct iov_iter i; 266 struct iov_iter i;
267 ssize_t bw; 267 ssize_t bw;
268 268
269 iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); 269 iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
270 270
271 file_start_write(file); 271 file_start_write(file);
272 bw = vfs_iter_write(file, &i, ppos, 0); 272 bw = vfs_iter_write(file, &i, ppos, 0);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e126e4cac2ca..92ec1bbece51 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock);
262 262
263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); 263static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
264static void blkfront_gather_backend_features(struct blkfront_info *info); 264static void blkfront_gather_backend_features(struct blkfront_info *info);
265static int negotiate_mq(struct blkfront_info *info);
265 266
266static int get_id_from_freelist(struct blkfront_ring_info *rinfo) 267static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
267{ 268{
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev,
1774 unsigned int i, max_page_order; 1775 unsigned int i, max_page_order;
1775 unsigned int ring_page_order; 1776 unsigned int ring_page_order;
1776 1777
1778 if (!info)
1779 return -ENODEV;
1780
1777 max_page_order = xenbus_read_unsigned(info->xbdev->otherend, 1781 max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1778 "max-ring-page-order", 0); 1782 "max-ring-page-order", 0);
1779 ring_page_order = min(xen_blkif_max_ring_order, max_page_order); 1783 ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1780 info->nr_ring_pages = 1 << ring_page_order; 1784 info->nr_ring_pages = 1 << ring_page_order;
1781 1785
1786 err = negotiate_mq(info);
1787 if (err)
1788 goto destroy_blkring;
1789
1782 for (i = 0; i < info->nr_rings; i++) { 1790 for (i = 0; i < info->nr_rings; i++) {
1783 struct blkfront_ring_info *rinfo = &info->rinfo[i]; 1791 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1784 1792
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev,
1978 } 1986 }
1979 1987
1980 info->xbdev = dev; 1988 info->xbdev = dev;
1981 err = negotiate_mq(info);
1982 if (err) {
1983 kfree(info);
1984 return err;
1985 }
1986 1989
1987 mutex_init(&info->mutex); 1990 mutex_init(&info->mutex);
1988 info->vdevice = vdevice; 1991 info->vdevice = vdevice;
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev)
2099 2102
2100 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2103 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2101 2104
2102 err = negotiate_mq(info);
2103 if (err)
2104 return err;
2105
2106 err = talk_to_blkback(dev, info); 2105 err = talk_to_blkback(dev, info);
2107 if (!err) 2106 if (!err)
2108 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); 2107 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fa4ce83893bb..5cd868ea28ed 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -231,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {
231 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, 231 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
232 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 232 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
233 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, 233 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
234 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
235 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 234 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
236 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 235 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
237 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, 236 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -264,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {
264 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, 263 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
265 264
266 /* QCA ROME chipset */ 265 /* QCA ROME chipset */
266 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
267 { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, 267 { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
268 { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, 268 { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
269 { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, 269 { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -390,10 +390,10 @@ static const struct usb_device_id blacklist_table[] = {
390 */ 390 */
391static const struct dmi_system_id btusb_needs_reset_resume_table[] = { 391static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
392 { 392 {
393 /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */ 393 /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */
394 .matches = { 394 .matches = {
395 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 395 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
396 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"), 396 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
397 }, 397 },
398 }, 398 },
399 {} 399 {}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 6314dfb02969..40b9fb247010 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
244 244
245 bt_dev_dbg(bdev, "Host wake IRQ"); 245 bt_dev_dbg(bdev, "Host wake IRQ");
246 246
247 pm_request_resume(bdev->dev); 247 pm_runtime_get(bdev->dev);
248 pm_runtime_mark_last_busy(bdev->dev);
249 pm_runtime_put_autosuspend(bdev->dev);
248 250
249 return IRQ_HANDLED; 251 return IRQ_HANDLED;
250} 252}
@@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
301 .usb_auto_sleep = 0, 303 .usb_auto_sleep = 0,
302 .usb_resume_timeout = 0, 304 .usb_resume_timeout = 0,
303 .break_to_host = 0, 305 .break_to_host = 0,
304 .pulsed_host_wake = 0, 306 .pulsed_host_wake = 1,
305}; 307};
306 308
307static int bcm_setup_sleep(struct hci_uart *hu) 309static int bcm_setup_sleep(struct hci_uart *hu)
@@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
586 } else if (!bcm->rx_skb) { 588 } else if (!bcm->rx_skb) {
587 /* Delay auto-suspend when receiving completed packet */ 589 /* Delay auto-suspend when receiving completed packet */
588 mutex_lock(&bcm_device_lock); 590 mutex_lock(&bcm_device_lock);
589 if (bcm->dev && bcm_device_exists(bcm->dev)) 591 if (bcm->dev && bcm_device_exists(bcm->dev)) {
590 pm_request_resume(bcm->dev->dev); 592 pm_runtime_get(bcm->dev->dev);
593 pm_runtime_mark_last_busy(bcm->dev->dev);
594 pm_runtime_put_autosuspend(bcm->dev->dev);
595 }
591 mutex_unlock(&bcm_device_lock); 596 mutex_unlock(&bcm_device_lock);
592 } 597 }
593 598
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 44301a3d9963..a07f6451694a 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits {
449static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { 449static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
450 .mask0 = 0, 450 .mask0 = 0,
451 .set0 = 0, 451 .set0 = 0,
452 .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), 452 .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,
453 .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), 453 .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
454 .mask3 = (u32)~A2W_PLL_KA_MASK, 454 .mask3 = A2W_PLL_KA_MASK,
455 .set3 = (2 << A2W_PLL_KA_SHIFT), 455 .set3 = (2 << A2W_PLL_KA_SHIFT),
456 .fb_prediv_mask = BIT(14), 456 .fb_prediv_mask = BIT(14),
457}; 457};
458 458
459static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { 459static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
460 .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), 460 .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,
461 .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), 461 .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
462 .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), 462 .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,
463 .set1 = (6 << A2W_PLLH_KP_SHIFT), 463 .set1 = (6 << A2W_PLLH_KP_SHIFT),
464 .mask3 = 0, 464 .mask3 = 0,
465 .set3 = 0, 465 .set3 = 0,
@@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
623 ~A2W_PLL_CTRL_PWRDN); 623 ~A2W_PLL_CTRL_PWRDN);
624 624
625 /* Take the PLL out of reset. */ 625 /* Take the PLL out of reset. */
626 spin_lock(&cprman->regs_lock);
626 cprman_write(cprman, data->cm_ctrl_reg, 627 cprman_write(cprman, data->cm_ctrl_reg,
627 cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); 628 cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
629 spin_unlock(&cprman->regs_lock);
628 630
629 /* Wait for the PLL to lock. */ 631 /* Wait for the PLL to lock. */
630 timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); 632 timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
@@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
701 } 703 }
702 704
703 /* Unmask the reference clock from the oscillator. */ 705 /* Unmask the reference clock from the oscillator. */
706 spin_lock(&cprman->regs_lock);
704 cprman_write(cprman, A2W_XOSC_CTRL, 707 cprman_write(cprman, A2W_XOSC_CTRL,
705 cprman_read(cprman, A2W_XOSC_CTRL) | 708 cprman_read(cprman, A2W_XOSC_CTRL) |
706 data->reference_enable_mask); 709 data->reference_enable_mask);
710 spin_unlock(&cprman->regs_lock);
707 711
708 if (do_ana_setup_first) 712 if (do_ana_setup_first)
709 bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); 713 bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 9f7f931d6b2f..5eb50c31e455 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -205,6 +205,18 @@ static const struct aspeed_clk_soc_data ast2400_data = {
205 .calc_pll = aspeed_ast2400_calc_pll, 205 .calc_pll = aspeed_ast2400_calc_pll,
206}; 206};
207 207
208static int aspeed_clk_is_enabled(struct clk_hw *hw)
209{
210 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
211 u32 clk = BIT(gate->clock_idx);
212 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
213 u32 reg;
214
215 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
216
217 return ((reg & clk) == enval) ? 1 : 0;
218}
219
208static int aspeed_clk_enable(struct clk_hw *hw) 220static int aspeed_clk_enable(struct clk_hw *hw)
209{ 221{
210 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); 222 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
@@ -215,6 +227,11 @@ static int aspeed_clk_enable(struct clk_hw *hw)
215 227
216 spin_lock_irqsave(gate->lock, flags); 228 spin_lock_irqsave(gate->lock, flags);
217 229
230 if (aspeed_clk_is_enabled(hw)) {
231 spin_unlock_irqrestore(gate->lock, flags);
232 return 0;
233 }
234
218 if (gate->reset_idx >= 0) { 235 if (gate->reset_idx >= 0) {
219 /* Put IP in reset */ 236 /* Put IP in reset */
220 regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst); 237 regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
@@ -255,17 +272,6 @@ static void aspeed_clk_disable(struct clk_hw *hw)
255 spin_unlock_irqrestore(gate->lock, flags); 272 spin_unlock_irqrestore(gate->lock, flags);
256} 273}
257 274
258static int aspeed_clk_is_enabled(struct clk_hw *hw)
259{
260 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
261 u32 clk = BIT(gate->clock_idx);
262 u32 reg;
263
264 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
265
266 return (reg & clk) ? 0 : 1;
267}
268
269static const struct clk_ops aspeed_clk_gate_ops = { 275static const struct clk_ops aspeed_clk_gate_ops = {
270 .enable = aspeed_clk_enable, 276 .enable = aspeed_clk_enable,
271 .disable = aspeed_clk_disable, 277 .disable = aspeed_clk_disable,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0f686a9dac3e..076d4244d672 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1125,8 +1125,10 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
1125{ 1125{
1126 lockdep_assert_held(&prepare_lock); 1126 lockdep_assert_held(&prepare_lock);
1127 1127
1128 if (!core) 1128 if (!core) {
1129 req->rate = 0;
1129 return 0; 1130 return 0;
1131 }
1130 1132
1131 clk_core_init_rate_req(core, req); 1133 clk_core_init_rate_req(core, req);
1132 1134
@@ -2309,8 +2311,11 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2309 2311
2310 trace_clk_set_phase(core, degrees); 2312 trace_clk_set_phase(core, degrees);
2311 2313
2312 if (core->ops->set_phase) 2314 if (core->ops->set_phase) {
2313 ret = core->ops->set_phase(core->hw, degrees); 2315 ret = core->ops->set_phase(core->hw, degrees);
2316 if (!ret)
2317 core->phase = degrees;
2318 }
2314 2319
2315 trace_clk_set_phase_complete(core, degrees); 2320 trace_clk_set_phase_complete(core, degrees);
2316 2321
@@ -2968,22 +2973,37 @@ static int __clk_core_init(struct clk_core *core)
2968 core->rate = core->req_rate = rate; 2973 core->rate = core->req_rate = rate;
2969 2974
2970 /* 2975 /*
2976 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
2977 * don't get accidentally disabled when walking the orphan tree and
2978 * reparenting clocks
2979 */
2980 if (core->flags & CLK_IS_CRITICAL) {
2981 unsigned long flags;
2982
2983 clk_core_prepare(core);
2984
2985 flags = clk_enable_lock();
2986 clk_core_enable(core);
2987 clk_enable_unlock(flags);
2988 }
2989
2990 /*
2971 * walk the list of orphan clocks and reparent any that newly finds a 2991 * walk the list of orphan clocks and reparent any that newly finds a
2972 * parent. 2992 * parent.
2973 */ 2993 */
2974 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2994 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2975 struct clk_core *parent = __clk_init_parent(orphan); 2995 struct clk_core *parent = __clk_init_parent(orphan);
2976 unsigned long flags;
2977 2996
2978 /* 2997 /*
2979 * we could call __clk_set_parent, but that would result in a 2998 * We need to use __clk_set_parent_before() and _after() to
2980 * redundant call to the .set_rate op, if it exists 2999 * to properly migrate any prepare/enable count of the orphan
3000 * clock. This is important for CLK_IS_CRITICAL clocks, which
3001 * are enabled during init but might not have a parent yet.
2981 */ 3002 */
2982 if (parent) { 3003 if (parent) {
2983 /* update the clk tree topology */ 3004 /* update the clk tree topology */
2984 flags = clk_enable_lock(); 3005 __clk_set_parent_before(orphan, parent);
2985 clk_reparent(orphan, parent); 3006 __clk_set_parent_after(orphan, parent, NULL);
2986 clk_enable_unlock(flags);
2987 __clk_recalc_accuracies(orphan); 3007 __clk_recalc_accuracies(orphan);
2988 __clk_recalc_rates(orphan, 0); 3008 __clk_recalc_rates(orphan, 0);
2989 } 3009 }
@@ -3000,16 +3020,6 @@ static int __clk_core_init(struct clk_core *core)
3000 if (core->ops->init) 3020 if (core->ops->init)
3001 core->ops->init(core->hw); 3021 core->ops->init(core->hw);
3002 3022
3003 if (core->flags & CLK_IS_CRITICAL) {
3004 unsigned long flags;
3005
3006 clk_core_prepare(core);
3007
3008 flags = clk_enable_lock();
3009 clk_core_enable(core);
3010 clk_enable_unlock(flags);
3011 }
3012
3013 kref_init(&core->ref); 3023 kref_init(&core->ref);
3014out: 3024out:
3015 clk_pm_runtime_put(core); 3025 clk_pm_runtime_put(core);
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 9b6c72bbddf9..e8b2c43b1bb8 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -149,6 +149,8 @@ static int hi3660_stub_clk_probe(struct platform_device *pdev)
149 return PTR_ERR(stub_clk_chan.mbox); 149 return PTR_ERR(stub_clk_chan.mbox);
150 150
151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
152 if (!res)
153 return -EINVAL;
152 freq_reg = devm_ioremap(dev, res->start, resource_size(res)); 154 freq_reg = devm_ioremap(dev, res->start, resource_size(res));
153 if (!freq_reg) 155 if (!freq_reg)
154 return -ENOMEM; 156 return -ENOMEM;
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index c864992e6983..caa8bd40692c 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -131,7 +131,17 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
131static struct clk *clk[IMX5_CLK_END]; 131static struct clk *clk[IMX5_CLK_END];
132static struct clk_onecell_data clk_data; 132static struct clk_onecell_data clk_data;
133 133
134static struct clk ** const uart_clks[] __initconst = { 134static struct clk ** const uart_clks_mx51[] __initconst = {
135 &clk[IMX5_CLK_UART1_IPG_GATE],
136 &clk[IMX5_CLK_UART1_PER_GATE],
137 &clk[IMX5_CLK_UART2_IPG_GATE],
138 &clk[IMX5_CLK_UART2_PER_GATE],
139 &clk[IMX5_CLK_UART3_IPG_GATE],
140 &clk[IMX5_CLK_UART3_PER_GATE],
141 NULL
142};
143
144static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
135 &clk[IMX5_CLK_UART1_IPG_GATE], 145 &clk[IMX5_CLK_UART1_IPG_GATE],
136 &clk[IMX5_CLK_UART1_PER_GATE], 146 &clk[IMX5_CLK_UART1_PER_GATE],
137 &clk[IMX5_CLK_UART2_IPG_GATE], 147 &clk[IMX5_CLK_UART2_IPG_GATE],
@@ -321,8 +331,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
321 clk_prepare_enable(clk[IMX5_CLK_TMAX1]); 331 clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
322 clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */ 332 clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
323 clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */ 333 clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
324
325 imx_register_uart_clocks(uart_clks);
326} 334}
327 335
328static void __init mx50_clocks_init(struct device_node *np) 336static void __init mx50_clocks_init(struct device_node *np)
@@ -388,6 +396,8 @@ static void __init mx50_clocks_init(struct device_node *np)
388 396
389 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); 397 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
390 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); 398 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
399
400 imx_register_uart_clocks(uart_clks_mx50_mx53);
391} 401}
392CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init); 402CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
393 403
@@ -477,6 +487,8 @@ static void __init mx51_clocks_init(struct device_node *np)
477 val = readl(MXC_CCM_CLPCR); 487 val = readl(MXC_CCM_CLPCR);
478 val |= 1 << 23; 488 val |= 1 << 23;
479 writel(val, MXC_CCM_CLPCR); 489 writel(val, MXC_CCM_CLPCR);
490
491 imx_register_uart_clocks(uart_clks_mx51);
480} 492}
481CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init); 493CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
482 494
@@ -606,5 +618,7 @@ static void __init mx53_clocks_init(struct device_node *np)
606 618
607 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); 619 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
608 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); 620 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
621
622 imx_register_uart_clocks(uart_clks_mx50_mx53);
609} 623}
610CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init); 624CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index 246957f1a413..b1cc8dbcd327 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -49,11 +49,10 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
49 struct clk_regmap_mux_div *a53cc; 49 struct clk_regmap_mux_div *a53cc;
50 struct regmap *regmap; 50 struct regmap *regmap;
51 struct clk_init_data init = { }; 51 struct clk_init_data init = { };
52 int ret; 52 int ret = -ENODEV;
53 53
54 regmap = dev_get_regmap(parent, NULL); 54 regmap = dev_get_regmap(parent, NULL);
55 if (IS_ERR(regmap)) { 55 if (!regmap) {
56 ret = PTR_ERR(regmap);
57 dev_err(dev, "failed to get regmap: %d\n", ret); 56 dev_err(dev, "failed to get regmap: %d\n", ret);
58 return ret; 57 return ret;
59 } 58 }
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 72b16ed1012b..3b97f60540ad 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = {
762 .features = CCU_FEATURE_FIXED_PREDIV, 762 .features = CCU_FEATURE_FIXED_PREDIV,
763 .hw.init = CLK_HW_INIT_PARENTS("out-a", 763 .hw.init = CLK_HW_INIT_PARENTS("out-a",
764 clk_out_parents, 764 clk_out_parents,
765 &ccu_div_ops, 765 &ccu_mp_ops,
766 0), 766 0),
767 }, 767 },
768}; 768};
@@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = {
783 .features = CCU_FEATURE_FIXED_PREDIV, 783 .features = CCU_FEATURE_FIXED_PREDIV,
784 .hw.init = CLK_HW_INIT_PARENTS("out-b", 784 .hw.init = CLK_HW_INIT_PARENTS("out-b",
785 clk_out_parents, 785 clk_out_parents,
786 &ccu_div_ops, 786 &ccu_mp_ops,
787 0), 787 0),
788 }, 788 },
789}; 789};
@@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = {
804 .features = CCU_FEATURE_FIXED_PREDIV, 804 .features = CCU_FEATURE_FIXED_PREDIV,
805 .hw.init = CLK_HW_INIT_PARENTS("out-c", 805 .hw.init = CLK_HW_INIT_PARENTS("out-c",
806 clk_out_parents, 806 clk_out_parents,
807 &ccu_div_ops, 807 &ccu_mp_ops,
808 0), 808 0),
809 }, 809 },
810}; 810};
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 612491a26070..12e0a2d19911 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -45,7 +45,7 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
45 45
46static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = { 46static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
47 { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, 47 { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
48 { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP, "lcd_gclk", "lcdc_clkdm" }, 48 { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
49 { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" }, 49 { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
50 { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, 50 { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
51 { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, 51 { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 2b7c2e017665..63c5ddb50187 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -187,7 +187,7 @@ static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst
187 { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, 187 { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
188 { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, 188 { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
189 { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, 189 { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
190 { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "disp_clk", "dss_clkdm" }, 190 { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
191 { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, 191 { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
192 { 0 }, 192 { 0 },
193}; 193};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index afa0d6bfc5c1..421b05392220 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -537,6 +537,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
537 init.parent_names = &reg_data->parent; 537 init.parent_names = &reg_data->parent;
538 init.num_parents = 1; 538 init.num_parents = 1;
539 init.flags = 0; 539 init.flags = 0;
540 if (reg_data->flags & CLKF_SET_RATE_PARENT)
541 init.flags |= CLK_SET_RATE_PARENT;
540 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", 542 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
541 node->parent->name, node->name, 543 node->parent->name, node->name,
542 reg_data->offset, 0); 544 reg_data->offset, 0);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index b3b4ed9b6874..d2e5382821a4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -386,6 +386,7 @@ config ATMEL_PIT
386 386
387config ATMEL_ST 387config ATMEL_ST
388 bool "Atmel ST timer support" if COMPILE_TEST 388 bool "Atmel ST timer support" if COMPILE_TEST
389 depends on HAS_IOMEM
389 select TIMER_OF 390 select TIMER_OF
390 select MFD_SYSCON 391 select MFD_SYSCON
391 help 392 help
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f652a0e0f5a2..3548caa9e933 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -163,6 +163,7 @@ struct mv_xor_v2_device {
163 void __iomem *dma_base; 163 void __iomem *dma_base;
164 void __iomem *glob_base; 164 void __iomem *glob_base;
165 struct clk *clk; 165 struct clk *clk;
166 struct clk *reg_clk;
166 struct tasklet_struct irq_tasklet; 167 struct tasklet_struct irq_tasklet;
167 struct list_head free_sw_desc; 168 struct list_head free_sw_desc;
168 struct dma_device dmadev; 169 struct dma_device dmadev;
@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
749 if (ret) 750 if (ret)
750 return ret; 751 return ret;
751 752
753 xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
754 if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
755 if (!IS_ERR(xor_dev->reg_clk)) {
756 ret = clk_prepare_enable(xor_dev->reg_clk);
757 if (ret)
758 return ret;
759 } else {
760 return PTR_ERR(xor_dev->reg_clk);
761 }
762 }
763
752 xor_dev->clk = devm_clk_get(&pdev->dev, NULL); 764 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
753 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) 765 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
754 return -EPROBE_DEFER; 766 ret = EPROBE_DEFER;
767 goto disable_reg_clk;
768 }
755 if (!IS_ERR(xor_dev->clk)) { 769 if (!IS_ERR(xor_dev->clk)) {
756 ret = clk_prepare_enable(xor_dev->clk); 770 ret = clk_prepare_enable(xor_dev->clk);
757 if (ret) 771 if (ret)
758 return ret; 772 goto disable_reg_clk;
759 } 773 }
760 774
761 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, 775 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
@@ -866,8 +880,9 @@ free_hw_desq:
866free_msi_irqs: 880free_msi_irqs:
867 platform_msi_domain_free_irqs(&pdev->dev); 881 platform_msi_domain_free_irqs(&pdev->dev);
868disable_clk: 882disable_clk:
869 if (!IS_ERR(xor_dev->clk)) 883 clk_disable_unprepare(xor_dev->clk);
870 clk_disable_unprepare(xor_dev->clk); 884disable_reg_clk:
885 clk_disable_unprepare(xor_dev->reg_clk);
871 return ret; 886 return ret;
872} 887}
873 888
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index e3ff162c03fc..d0cacdb0713e 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
917 917
918 rcar_dmac_chan_configure_desc(chan, desc); 918 rcar_dmac_chan_configure_desc(chan, desc);
919 919
920 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; 920 max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
921 921
922 /* 922 /*
923 * Allocate and fill the transfer chunk descriptors. We own the only 923 * Allocate and fill the transfer chunk descriptors. We own the only
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index c16600f30611..0bdea60c65dd 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void)
639 platform_driver_unregister(&dcdbas_driver); 639 platform_driver_unregister(&dcdbas_driver);
640} 640}
641 641
642module_init(dcdbas_init); 642subsys_initcall_sync(dcdbas_init);
643module_exit(dcdbas_exit); 643module_exit(dcdbas_exit);
644 644
645MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); 645MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index da661bf8cb96..13c1edd37e96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
68 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; 68 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
69 efi_status_t status; 69 efi_status_t status;
70 efi_physical_addr_t log_location, log_last_entry; 70 efi_physical_addr_t log_location, log_last_entry;
71 struct linux_efi_tpm_eventlog *log_tbl; 71 struct linux_efi_tpm_eventlog *log_tbl = NULL;
72 unsigned long first_entry_addr, last_entry_addr; 72 unsigned long first_entry_addr, last_entry_addr;
73 size_t log_size, last_entry_size; 73 size_t log_size, last_entry_size;
74 efi_bool_t truncated; 74 efi_bool_t truncated;
75 void *tcg2_protocol; 75 void *tcg2_protocol = NULL;
76 76
77 status = efi_call_early(locate_protocol, &tcg2_guid, NULL, 77 status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
78 &tcg2_protocol); 78 &tcg2_protocol);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index e76de57dd617..ebaea8b1594b 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -14,7 +14,6 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#include <linux/clk.h>
18#include <linux/err.h> 17#include <linux/err.h>
19#include <linux/gpio.h> 18#include <linux/gpio.h>
20#include <linux/init.h> 19#include <linux/init.h>
@@ -37,10 +36,9 @@ struct gpio_rcar_priv {
37 struct platform_device *pdev; 36 struct platform_device *pdev;
38 struct gpio_chip gpio_chip; 37 struct gpio_chip gpio_chip;
39 struct irq_chip irq_chip; 38 struct irq_chip irq_chip;
40 struct clk *clk;
41 unsigned int irq_parent; 39 unsigned int irq_parent;
40 atomic_t wakeup_path;
42 bool has_both_edge_trigger; 41 bool has_both_edge_trigger;
43 bool needs_clk;
44}; 42};
45 43
46#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ 44#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
@@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
186 } 184 }
187 } 185 }
188 186
189 if (!p->clk)
190 return 0;
191
192 if (on) 187 if (on)
193 clk_enable(p->clk); 188 atomic_inc(&p->wakeup_path);
194 else 189 else
195 clk_disable(p->clk); 190 atomic_dec(&p->wakeup_path);
196 191
197 return 0; 192 return 0;
198} 193}
@@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
330 325
331struct gpio_rcar_info { 326struct gpio_rcar_info {
332 bool has_both_edge_trigger; 327 bool has_both_edge_trigger;
333 bool needs_clk;
334}; 328};
335 329
336static const struct gpio_rcar_info gpio_rcar_info_gen1 = { 330static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
337 .has_both_edge_trigger = false, 331 .has_both_edge_trigger = false,
338 .needs_clk = false,
339}; 332};
340 333
341static const struct gpio_rcar_info gpio_rcar_info_gen2 = { 334static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
342 .has_both_edge_trigger = true, 335 .has_both_edge_trigger = true,
343 .needs_clk = true,
344}; 336};
345 337
346static const struct of_device_id gpio_rcar_of_table[] = { 338static const struct of_device_id gpio_rcar_of_table[] = {
@@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
403 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); 395 ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
404 *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; 396 *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
405 p->has_both_edge_trigger = info->has_both_edge_trigger; 397 p->has_both_edge_trigger = info->has_both_edge_trigger;
406 p->needs_clk = info->needs_clk;
407 398
408 if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { 399 if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
409 dev_warn(&p->pdev->dev, 400 dev_warn(&p->pdev->dev,
@@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
440 431
441 platform_set_drvdata(pdev, p); 432 platform_set_drvdata(pdev, p);
442 433
443 p->clk = devm_clk_get(dev, NULL);
444 if (IS_ERR(p->clk)) {
445 if (p->needs_clk) {
446 dev_err(dev, "unable to get clock\n");
447 ret = PTR_ERR(p->clk);
448 goto err0;
449 }
450 p->clk = NULL;
451 }
452
453 pm_runtime_enable(dev); 434 pm_runtime_enable(dev);
454 435
455 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 436 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev)
531 return 0; 512 return 0;
532} 513}
533 514
515static int __maybe_unused gpio_rcar_suspend(struct device *dev)
516{
517 struct gpio_rcar_priv *p = dev_get_drvdata(dev);
518
519 if (atomic_read(&p->wakeup_path))
520 device_set_wakeup_path(dev);
521
522 return 0;
523}
524
525static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL);
526
534static struct platform_driver gpio_rcar_device_driver = { 527static struct platform_driver gpio_rcar_device_driver = {
535 .probe = gpio_rcar_probe, 528 .probe = gpio_rcar_probe,
536 .remove = gpio_rcar_remove, 529 .remove = gpio_rcar_remove,
537 .driver = { 530 .driver = {
538 .name = "gpio_rcar", 531 .name = "gpio_rcar",
532 .pm = &gpio_rcar_pm_ops,
539 .of_match_table = of_match_ptr(gpio_rcar_of_table), 533 .of_match_table = of_match_ptr(gpio_rcar_of_table),
540 } 534 }
541}; 535};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57afad79f55d..8fa850a070e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
540 size_t size; 540 size_t size;
541 u32 retry = 3; 541 u32 retry = 3;
542 542
543 if (amdgpu_acpi_pcie_notify_device_ready(adev))
544 return -EINVAL;
545
543 /* Get the device handle */ 546 /* Get the device handle */
544 handle = ACPI_HANDLE(&adev->pdev->dev); 547 handle = ACPI_HANDLE(&adev->pdev->dev);
545 if (!handle) 548 if (!handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 74d2efaec52f..7a073ac5f9c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index af1b879a9ee9..66cb10cdc7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,9 +2063,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2063 2063
2064 DRM_INFO("amdgpu: finishing device.\n"); 2064 DRM_INFO("amdgpu: finishing device.\n");
2065 adev->shutdown = true; 2065 adev->shutdown = true;
2066 if (adev->mode_info.mode_config_initialized) 2066 if (adev->mode_info.mode_config_initialized){
2067 drm_crtc_force_disable_all(adev->ddev); 2067 if (!amdgpu_device_has_dc_support(adev))
2068 2068 drm_crtc_force_disable_all(adev->ddev);
2069 else
2070 drm_atomic_helper_shutdown(adev->ddev);
2071 }
2069 amdgpu_ib_pool_fini(adev); 2072 amdgpu_ib_pool_fini(adev);
2070 amdgpu_fence_driver_fini(adev); 2073 amdgpu_fence_driver_fini(adev);
2071 amdgpu_fbdev_fini(adev); 2074 amdgpu_fbdev_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e48b4ec88c8c..ca6c931dabfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37 37
38 if (robj) { 38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 amdgpu_mn_unregister(robj); 39 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj); 40 amdgpu_bo_unref(&robj);
43 } 41 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 54f06c959340..2264c5c97009 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -352,6 +352,7 @@ struct amdgpu_mode_info {
352 u16 firmware_flags; 352 u16 firmware_flags;
353 /* pointer to backlight encoder */ 353 /* pointer to backlight encoder */
354 struct amdgpu_encoder *bl_encoder; 354 struct amdgpu_encoder *bl_encoder;
355 u8 bl_level; /* saved backlight level */
355 struct amdgpu_audio audio; /* audio stuff */ 356 struct amdgpu_audio audio; /* audio stuff */
356 int num_crtc; /* number of crtcs */ 357 int num_crtc; /* number of crtcs */
357 int num_hpd; /* number of hpd pins */ 358 int num_hpd; /* number of hpd pins */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5c4c3e0d527b..1220322c1680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
56 56
57 amdgpu_bo_kunmap(bo); 57 amdgpu_bo_kunmap(bo);
58 58
59 if (bo->gem_base.import_attach)
60 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
59 drm_gem_object_release(&bo->gem_base); 61 drm_gem_object_release(&bo->gem_base);
60 amdgpu_bo_unref(&bo->parent); 62 amdgpu_bo_unref(&bo->parent);
61 if (!list_empty(&bo->shadow_list)) { 63 if (!list_empty(&bo->shadow_list)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 13044e66dcaf..561d3312af32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
481 result = 0; 481 result = 0;
482 482
483 if (*pos < 12) { 483 if (*pos < 12) {
484 early[0] = amdgpu_ring_get_rptr(ring); 484 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask; 485 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
486 early[2] = ring->wptr & ring->buf_mask; 486 early[2] = ring->wptr & ring->buf_mask;
487 for (i = *pos / 4; i < 3 && size; i++) { 487 for (i = *pos / 4; i < 3 && size; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b2eae86bf906..5c26a8e806b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
299 299
300 cancel_delayed_work_sync(&adev->uvd.idle_work); 300 cancel_delayed_work_sync(&adev->uvd.idle_work);
301 301
302 for (i = 0; i < adev->uvd.max_handles; ++i) 302 /* only valid for physical mode */
303 if (atomic_read(&adev->uvd.handles[i])) 303 if (adev->asic_type < CHIP_POLARIS10) {
304 break; 304 for (i = 0; i < adev->uvd.max_handles; ++i)
305 if (atomic_read(&adev->uvd.handles[i]))
306 break;
305 307
306 if (i == AMDGPU_MAX_UVD_HANDLES) 308 if (i == adev->uvd.max_handles)
307 return 0; 309 return 0;
310 }
308 311
309 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 312 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
310 ptr = adev->uvd.cpu_addr; 313 ptr = adev->uvd.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include "bif/bif_4_1_d.h" 35#include "bif/bif_4_1_d.h"
36 36
37static u8 37u8
38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) 38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
39{ 39{
40 u8 backlight_level; 40 u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
48 return backlight_level; 48 return backlight_level;
49} 49}
50 50
51static void 51void
52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, 52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
53 u8 backlight_level) 53 u8 backlight_level)
54{ 54{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
25#define __ATOMBIOS_ENCODER_H__ 25#define __ATOMBIOS_ENCODER_H__
26 26
27u8 27u8
28amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
29void
30amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
31 u8 backlight_level);
32u8
28amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); 33amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
29void 34void
30amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 35amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f34bc68aadfb..022f303463fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)
2921 2921
2922static int dce_v10_0_suspend(void *handle) 2922static int dce_v10_0_suspend(void *handle)
2923{ 2923{
2924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2925
2926 adev->mode_info.bl_level =
2927 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2928
2924 return dce_v10_0_hw_fini(handle); 2929 return dce_v10_0_hw_fini(handle);
2925} 2930}
2926 2931
@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930 int ret; 2935 int ret;
2931 2936
2937 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2938 adev->mode_info.bl_level);
2939
2932 ret = dce_v10_0_hw_init(handle); 2940 ret = dce_v10_0_hw_init(handle);
2933 2941
2934 /* turn on the BL */ 2942 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 26378bd6aba4..800a9f36ab4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)
3047 3047
3048static int dce_v11_0_suspend(void *handle) 3048static int dce_v11_0_suspend(void *handle)
3049{ 3049{
3050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3051
3052 adev->mode_info.bl_level =
3053 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
3054
3050 return dce_v11_0_hw_fini(handle); 3055 return dce_v11_0_hw_fini(handle);
3051} 3056}
3052 3057
@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)
3055 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3056 int ret; 3061 int ret;
3057 3062
3063 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3064 adev->mode_info.bl_level);
3065
3058 ret = dce_v11_0_hw_init(handle); 3066 ret = dce_v11_0_hw_init(handle);
3059 3067
3060 /* turn on the BL */ 3068 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index bd2c4f727df6..b8368f69ce1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)
2787 2787
2788static int dce_v6_0_suspend(void *handle) 2788static int dce_v6_0_suspend(void *handle)
2789{ 2789{
2790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2791
2792 adev->mode_info.bl_level =
2793 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2794
2790 return dce_v6_0_hw_fini(handle); 2795 return dce_v6_0_hw_fini(handle);
2791} 2796}
2792 2797
@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)
2795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2800 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2796 int ret; 2801 int ret;
2797 2802
2803 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2804 adev->mode_info.bl_level);
2805
2798 ret = dce_v6_0_hw_init(handle); 2806 ret = dce_v6_0_hw_init(handle);
2799 2807
2800 /* turn on the BL */ 2808 /* turn on the BL */
@@ -3093,7 +3101,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3093 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3101 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3094 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3102 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3095 schedule_work(&adev->hotplug_work); 3103 schedule_work(&adev->hotplug_work);
3096 DRM_INFO("IH: HPD%d\n", hpd + 1); 3104 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3097 } 3105 }
3098 3106
3099 return 0; 3107 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c008dc030687..012e0a9ae0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)
2819 2819
2820static int dce_v8_0_suspend(void *handle) 2820static int dce_v8_0_suspend(void *handle)
2821{ 2821{
2822 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2823
2824 adev->mode_info.bl_level =
2825 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2826
2822 return dce_v8_0_hw_fini(handle); 2827 return dce_v8_0_hw_fini(handle);
2823} 2828}
2824 2829
@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)
2827 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2828 int ret; 2833 int ret;
2829 2834
2835 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2836 adev->mode_info.bl_level);
2837
2830 ret = dce_v8_0_hw_init(handle); 2838 ret = dce_v8_0_hw_init(handle);
2831 2839
2832 /* turn on the BL */ 2840 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a066c5eda135..a4309698e76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
4384 case CHIP_KAVERI: 4384 case CHIP_KAVERI:
4385 adev->gfx.config.max_shader_engines = 1; 4385 adev->gfx.config.max_shader_engines = 1;
4386 adev->gfx.config.max_tile_pipes = 4; 4386 adev->gfx.config.max_tile_pipes = 4;
4387 if ((adev->pdev->device == 0x1304) || 4387 adev->gfx.config.max_cu_per_sh = 8;
4388 (adev->pdev->device == 0x1305) || 4388 adev->gfx.config.max_backends_per_se = 2;
4389 (adev->pdev->device == 0x130C) ||
4390 (adev->pdev->device == 0x130F) ||
4391 (adev->pdev->device == 0x1310) ||
4392 (adev->pdev->device == 0x1311) ||
4393 (adev->pdev->device == 0x131C)) {
4394 adev->gfx.config.max_cu_per_sh = 8;
4395 adev->gfx.config.max_backends_per_se = 2;
4396 } else if ((adev->pdev->device == 0x1309) ||
4397 (adev->pdev->device == 0x130A) ||
4398 (adev->pdev->device == 0x130D) ||
4399 (adev->pdev->device == 0x1313) ||
4400 (adev->pdev->device == 0x131D)) {
4401 adev->gfx.config.max_cu_per_sh = 6;
4402 adev->gfx.config.max_backends_per_se = 2;
4403 } else if ((adev->pdev->device == 0x1306) ||
4404 (adev->pdev->device == 0x1307) ||
4405 (adev->pdev->device == 0x130B) ||
4406 (adev->pdev->device == 0x130E) ||
4407 (adev->pdev->device == 0x1315) ||
4408 (adev->pdev->device == 0x131B)) {
4409 adev->gfx.config.max_cu_per_sh = 4;
4410 adev->gfx.config.max_backends_per_se = 1;
4411 } else {
4412 adev->gfx.config.max_cu_per_sh = 3;
4413 adev->gfx.config.max_backends_per_se = 1;
4414 }
4415 adev->gfx.config.max_sh_per_se = 1; 4389 adev->gfx.config.max_sh_per_se = 1;
4416 adev->gfx.config.max_texture_channel_caches = 4; 4390 adev->gfx.config.max_texture_channel_caches = 4;
4417 adev->gfx.config.max_gprs = 256; 4391 adev->gfx.config.max_gprs = 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 543101d5a5ed..2095173aaabf 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -31,6 +31,7 @@
31#include "amdgpu_uvd.h" 31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34#include "amdgpu_powerplay.h" 35#include "amdgpu_powerplay.h"
35#include "sid.h" 36#include "sid.h"
36#include "si_ih.h" 37#include "si_ih.h"
@@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1461{ 1462{
1462 struct pci_dev *root = adev->pdev->bus->self; 1463 struct pci_dev *root = adev->pdev->bus->self;
1463 int bridge_pos, gpu_pos; 1464 int bridge_pos, gpu_pos;
1464 u32 speed_cntl, mask, current_data_rate; 1465 u32 speed_cntl, current_data_rate;
1465 int ret, i; 1466 int i;
1466 u16 tmp16; 1467 u16 tmp16;
1467 1468
1468 if (pci_is_root_bus(adev->pdev->bus)) 1469 if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1474 if (adev->flags & AMD_IS_APU) 1475 if (adev->flags & AMD_IS_APU)
1475 return; 1476 return;
1476 1477
1477 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1478 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1478 if (ret != 0) 1479 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1479 return;
1480
1481 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1482 return; 1480 return;
1483 1481
1484 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 1482 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1485 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> 1483 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
1486 LC_CURRENT_DATA_RATE_SHIFT; 1484 LC_CURRENT_DATA_RATE_SHIFT;
1487 if (mask & DRM_PCIE_SPEED_80) { 1485 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1488 if (current_data_rate == 2) { 1486 if (current_data_rate == 2) {
1489 DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1487 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
1490 return; 1488 return;
1491 } 1489 }
1492 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1490 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
1493 } else if (mask & DRM_PCIE_SPEED_50) { 1491 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
1494 if (current_data_rate == 1) { 1492 if (current_data_rate == 1) {
1495 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1493 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
1496 return; 1494 return;
@@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1506 if (!gpu_pos) 1504 if (!gpu_pos)
1507 return; 1505 return;
1508 1506
1509 if (mask & DRM_PCIE_SPEED_80) { 1507 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1510 if (current_data_rate != 2) { 1508 if (current_data_rate != 2) {
1511 u16 bridge_cfg, gpu_cfg; 1509 u16 bridge_cfg, gpu_cfg;
1512 u16 bridge_cfg2, gpu_cfg2; 1510 u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1589 1587
1590 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); 1588 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1591 tmp16 &= ~0xf; 1589 tmp16 &= ~0xf;
1592 if (mask & DRM_PCIE_SPEED_80) 1590 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1593 tmp16 |= 3; 1591 tmp16 |= 3;
1594 else if (mask & DRM_PCIE_SPEED_50) 1592 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1595 tmp16 |= 2; 1593 tmp16 |= 2;
1596 else 1594 else
1597 tmp16 |= 1; 1595 tmp16 |= 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index ce675a7f179a..22f0b7ff3ac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -26,6 +26,7 @@
26#include "amdgpu_pm.h" 26#include "amdgpu_pm.h"
27#include "amdgpu_dpm.h" 27#include "amdgpu_dpm.h"
28#include "amdgpu_atombios.h" 28#include "amdgpu_atombios.h"
29#include "amd_pcie.h"
29#include "sid.h" 30#include "sid.h"
30#include "r600_dpm.h" 31#include "r600_dpm.h"
31#include "si_dpm.h" 32#include "si_dpm.h"
@@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3331 } 3332 }
3332} 3333}
3333 3334
3334static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
3335 u32 sys_mask,
3336 enum amdgpu_pcie_gen asic_gen,
3337 enum amdgpu_pcie_gen default_gen)
3338{
3339 switch (asic_gen) {
3340 case AMDGPU_PCIE_GEN1:
3341 return AMDGPU_PCIE_GEN1;
3342 case AMDGPU_PCIE_GEN2:
3343 return AMDGPU_PCIE_GEN2;
3344 case AMDGPU_PCIE_GEN3:
3345 return AMDGPU_PCIE_GEN3;
3346 default:
3347 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
3348 return AMDGPU_PCIE_GEN3;
3349 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
3350 return AMDGPU_PCIE_GEN2;
3351 else
3352 return AMDGPU_PCIE_GEN1;
3353 }
3354 return AMDGPU_PCIE_GEN1;
3355}
3356
3357static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, 3335static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3358 u32 *p, u32 *u) 3336 u32 *p, u32 *u)
3359{ 3337{
@@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
5028 table->ACPIState.levels[0].vddc.index, 5006 table->ACPIState.levels[0].vddc.index,
5029 &table->ACPIState.levels[0].std_vddc); 5007 &table->ACPIState.levels[0].std_vddc);
5030 } 5008 }
5031 table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev, 5009 table->ACPIState.levels[0].gen2PCIE =
5032 si_pi->sys_pcie_mask, 5010 (u8)amdgpu_get_pcie_gen_support(adev,
5033 si_pi->boot_pcie_gen, 5011 si_pi->sys_pcie_mask,
5034 AMDGPU_PCIE_GEN1); 5012 si_pi->boot_pcie_gen,
5013 AMDGPU_PCIE_GEN1);
5035 5014
5036 if (si_pi->vddc_phase_shed_control) 5015 if (si_pi->vddc_phase_shed_control)
5037 si_populate_phase_shedding_value(adev, 5016 si_populate_phase_shedding_value(adev,
@@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7168 pl->vddc = le16_to_cpu(clock_info->si.usVDDC); 7147 pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7169 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); 7148 pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7170 pl->flags = le32_to_cpu(clock_info->si.ulFlags); 7149 pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7171 pl->pcie_gen = r600_get_pcie_gen_support(adev, 7150 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
7172 si_pi->sys_pcie_mask, 7151 si_pi->sys_pcie_mask,
7173 si_pi->boot_pcie_gen, 7152 si_pi->boot_pcie_gen,
7174 clock_info->si.ucPCIEGen); 7153 clock_info->si.ucPCIEGen);
7175 7154
7176 /* patch up vddc if necessary */ 7155 /* patch up vddc if necessary */
7177 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, 7156 ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
7326 struct si_power_info *si_pi; 7305 struct si_power_info *si_pi;
7327 struct atom_clock_dividers dividers; 7306 struct atom_clock_dividers dividers;
7328 int ret; 7307 int ret;
7329 u32 mask;
7330 7308
7331 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); 7309 si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7332 if (si_pi == NULL) 7310 if (si_pi == NULL)
@@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
7336 eg_pi = &ni_pi->eg; 7314 eg_pi = &ni_pi->eg;
7337 pi = &eg_pi->rv7xx; 7315 pi = &eg_pi->rv7xx;
7338 7316
7339 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 7317 si_pi->sys_pcie_mask =
7340 if (ret) 7318 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
7341 si_pi->sys_pcie_mask = 0; 7319 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
7342 else
7343 si_pi->sys_pcie_mask = mask;
7344 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 7320 si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7345 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); 7321 si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7346 7322
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 862835dc054e..63c67346d316 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1037,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)
1037 !is_mst_root_connector) { 1037 !is_mst_root_connector) {
1038 /* Downstream Port status changed. */ 1038 /* Downstream Port status changed. */
1039 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1039 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1040
1041 if (aconnector->fake_enable)
1042 aconnector->fake_enable = false;
1043
1040 amdgpu_dm_update_connector_after_detect(aconnector); 1044 amdgpu_dm_update_connector_after_detect(aconnector);
1041 1045
1042 1046
@@ -2012,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2012 dst.width = stream->timing.h_addressable; 2016 dst.width = stream->timing.h_addressable;
2013 dst.height = stream->timing.v_addressable; 2017 dst.height = stream->timing.v_addressable;
2014 2018
2015 rmx_type = dm_state->scaling; 2019 if (dm_state) {
2016 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 2020 rmx_type = dm_state->scaling;
2017 if (src.width * dst.height < 2021 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2018 src.height * dst.width) { 2022 if (src.width * dst.height <
2019 /* height needs less upscaling/more downscaling */ 2023 src.height * dst.width) {
2020 dst.width = src.width * 2024 /* height needs less upscaling/more downscaling */
2021 dst.height / src.height; 2025 dst.width = src.width *
2022 } else { 2026 dst.height / src.height;
2023 /* width needs less upscaling/more downscaling */ 2027 } else {
2024 dst.height = src.height * 2028 /* width needs less upscaling/more downscaling */
2025 dst.width / src.width; 2029 dst.height = src.height *
2030 dst.width / src.width;
2031 }
2032 } else if (rmx_type == RMX_CENTER) {
2033 dst = src;
2026 } 2034 }
2027 } else if (rmx_type == RMX_CENTER) {
2028 dst = src;
2029 }
2030 2035
2031 dst.x = (stream->timing.h_addressable - dst.width) / 2; 2036 dst.x = (stream->timing.h_addressable - dst.width) / 2;
2032 dst.y = (stream->timing.v_addressable - dst.height) / 2; 2037 dst.y = (stream->timing.v_addressable - dst.height) / 2;
2033 2038
2034 if (dm_state->underscan_enable) { 2039 if (dm_state->underscan_enable) {
2035 dst.x += dm_state->underscan_hborder / 2; 2040 dst.x += dm_state->underscan_hborder / 2;
2036 dst.y += dm_state->underscan_vborder / 2; 2041 dst.y += dm_state->underscan_vborder / 2;
2037 dst.width -= dm_state->underscan_hborder; 2042 dst.width -= dm_state->underscan_hborder;
2038 dst.height -= dm_state->underscan_vborder; 2043 dst.height -= dm_state->underscan_vborder;
2044 }
2039 } 2045 }
2040 2046
2041 stream->src = src; 2047 stream->src = src;
@@ -2360,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2360 2366
2361 if (aconnector == NULL) { 2367 if (aconnector == NULL) {
2362 DRM_ERROR("aconnector is NULL!\n"); 2368 DRM_ERROR("aconnector is NULL!\n");
2363 goto drm_connector_null; 2369 return stream;
2364 }
2365
2366 if (dm_state == NULL) {
2367 DRM_ERROR("dm_state is NULL!\n");
2368 goto dm_state_null;
2369 } 2370 }
2370 2371
2371 drm_connector = &aconnector->base; 2372 drm_connector = &aconnector->base;
@@ -2377,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2377 */ 2378 */
2378 if (aconnector->mst_port) { 2379 if (aconnector->mst_port) {
2379 dm_dp_mst_dc_sink_create(drm_connector); 2380 dm_dp_mst_dc_sink_create(drm_connector);
2380 goto mst_dc_sink_create_done; 2381 return stream;
2381 } 2382 }
2382 2383
2383 if (create_fake_sink(aconnector)) 2384 if (create_fake_sink(aconnector))
2384 goto stream_create_fail; 2385 return stream;
2385 } 2386 }
2386 2387
2387 stream = dc_create_stream_for_sink(aconnector->dc_sink); 2388 stream = dc_create_stream_for_sink(aconnector->dc_sink);
2388 2389
2389 if (stream == NULL) { 2390 if (stream == NULL) {
2390 DRM_ERROR("Failed to create stream for sink!\n"); 2391 DRM_ERROR("Failed to create stream for sink!\n");
2391 goto stream_create_fail; 2392 return stream;
2392 } 2393 }
2393 2394
2394 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 2395 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
@@ -2414,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2414 } else { 2415 } else {
2415 decide_crtc_timing_for_drm_display_mode( 2416 decide_crtc_timing_for_drm_display_mode(
2416 &mode, preferred_mode, 2417 &mode, preferred_mode,
2417 dm_state->scaling != RMX_OFF); 2418 dm_state ? (dm_state->scaling != RMX_OFF) : false);
2418 } 2419 }
2419 2420
2421 if (!dm_state)
2422 drm_mode_set_crtcinfo(&mode, 0);
2423
2420 fill_stream_properties_from_drm_display_mode(stream, 2424 fill_stream_properties_from_drm_display_mode(stream,
2421 &mode, &aconnector->base); 2425 &mode, &aconnector->base);
2422 update_stream_scaling_settings(&mode, dm_state, stream); 2426 update_stream_scaling_settings(&mode, dm_state, stream);
@@ -2426,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2426 drm_connector, 2430 drm_connector,
2427 aconnector->dc_sink); 2431 aconnector->dc_sink);
2428 2432
2429stream_create_fail: 2433 update_stream_signal(stream);
2430dm_state_null: 2434
2431drm_connector_null:
2432mst_dc_sink_create_done:
2433 return stream; 2435 return stream;
2434} 2436}
2435 2437
@@ -2497,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
2497 return &state->base; 2499 return &state->base;
2498} 2500}
2499 2501
2502
2503static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
2504{
2505 enum dc_irq_source irq_source;
2506 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2507 struct amdgpu_device *adev = crtc->dev->dev_private;
2508
2509 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2510 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2511}
2512
2513static int dm_enable_vblank(struct drm_crtc *crtc)
2514{
2515 return dm_set_vblank(crtc, true);
2516}
2517
2518static void dm_disable_vblank(struct drm_crtc *crtc)
2519{
2520 dm_set_vblank(crtc, false);
2521}
2522
2500/* Implemented only the options currently availible for the driver */ 2523/* Implemented only the options currently availible for the driver */
2501static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 2524static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2502 .reset = dm_crtc_reset_state, 2525 .reset = dm_crtc_reset_state,
@@ -2506,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2506 .page_flip = drm_atomic_helper_page_flip, 2529 .page_flip = drm_atomic_helper_page_flip,
2507 .atomic_duplicate_state = dm_crtc_duplicate_state, 2530 .atomic_duplicate_state = dm_crtc_duplicate_state,
2508 .atomic_destroy_state = dm_crtc_destroy_state, 2531 .atomic_destroy_state = dm_crtc_destroy_state,
2532 .enable_vblank = dm_enable_vblank,
2533 .disable_vblank = dm_disable_vblank,
2509}; 2534};
2510 2535
2511static enum drm_connector_status 2536static enum drm_connector_status
@@ -2800,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2800 goto fail; 2825 goto fail;
2801 } 2826 }
2802 2827
2803 stream = dc_create_stream_for_sink(dc_sink); 2828 stream = create_stream_for_sink(aconnector, mode, NULL);
2804 if (stream == NULL) { 2829 if (stream == NULL) {
2805 DRM_ERROR("Failed to create stream for sink!\n"); 2830 DRM_ERROR("Failed to create stream for sink!\n");
2806 goto fail; 2831 goto fail;
@@ -3060,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
3060 if (!dm_plane_state->dc_state) 3085 if (!dm_plane_state->dc_state)
3061 return 0; 3086 return 0;
3062 3087
3088 if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
3089 return -EINVAL;
3090
3063 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 3091 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3064 return 0; 3092 return 0;
3065 3093
@@ -3106,8 +3134,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3106 3134
3107 switch (aplane->base.type) { 3135 switch (aplane->base.type) {
3108 case DRM_PLANE_TYPE_PRIMARY: 3136 case DRM_PLANE_TYPE_PRIMARY:
3109 aplane->base.format_default = true;
3110
3111 res = drm_universal_plane_init( 3137 res = drm_universal_plane_init(
3112 dm->adev->ddev, 3138 dm->adev->ddev,
3113 &aplane->base, 3139 &aplane->base,
@@ -4632,8 +4658,6 @@ static int dm_update_planes_state(struct dc *dc,
4632 bool pflip_needed = !state->allow_modeset; 4658 bool pflip_needed = !state->allow_modeset;
4633 int ret = 0; 4659 int ret = 0;
4634 4660
4635 if (pflip_needed)
4636 return ret;
4637 4661
4638 /* Add new planes */ 4662 /* Add new planes */
4639 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4663 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
@@ -4648,6 +4672,8 @@ static int dm_update_planes_state(struct dc *dc,
4648 4672
4649 /* Remove any changed/removed planes */ 4673 /* Remove any changed/removed planes */
4650 if (!enable) { 4674 if (!enable) {
4675 if (pflip_needed)
4676 continue;
4651 4677
4652 if (!old_plane_crtc) 4678 if (!old_plane_crtc)
4653 continue; 4679 continue;
@@ -4679,6 +4705,7 @@ static int dm_update_planes_state(struct dc *dc,
4679 *lock_and_validation_needed = true; 4705 *lock_and_validation_needed = true;
4680 4706
4681 } else { /* Add new planes */ 4707 } else { /* Add new planes */
4708 struct dc_plane_state *dc_new_plane_state;
4682 4709
4683 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4710 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4684 continue; 4711 continue;
@@ -4692,38 +4719,50 @@ static int dm_update_planes_state(struct dc *dc,
4692 if (!dm_new_crtc_state->stream) 4719 if (!dm_new_crtc_state->stream)
4693 continue; 4720 continue;
4694 4721
4722 if (pflip_needed)
4723 continue;
4695 4724
4696 WARN_ON(dm_new_plane_state->dc_state); 4725 WARN_ON(dm_new_plane_state->dc_state);
4697 4726
4698 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4727 dc_new_plane_state = dc_create_plane_state(dc);
4699 4728 if (!dc_new_plane_state) {
4700 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4701 plane->base.id, new_plane_crtc->base.id);
4702
4703 if (!dm_new_plane_state->dc_state) {
4704 ret = -EINVAL; 4729 ret = -EINVAL;
4705 return ret; 4730 return ret;
4706 } 4731 }
4707 4732
4733 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4734 plane->base.id, new_plane_crtc->base.id);
4735
4708 ret = fill_plane_attributes( 4736 ret = fill_plane_attributes(
4709 new_plane_crtc->dev->dev_private, 4737 new_plane_crtc->dev->dev_private,
4710 dm_new_plane_state->dc_state, 4738 dc_new_plane_state,
4711 new_plane_state, 4739 new_plane_state,
4712 new_crtc_state); 4740 new_crtc_state);
4713 if (ret) 4741 if (ret) {
4742 dc_plane_state_release(dc_new_plane_state);
4714 return ret; 4743 return ret;
4744 }
4715 4745
4716 4746 /*
4747 * Any atomic check errors that occur after this will
4748 * not need a release. The plane state will be attached
4749 * to the stream, and therefore part of the atomic
4750 * state. It'll be released when the atomic state is
4751 * cleaned.
4752 */
4717 if (!dc_add_plane_to_context( 4753 if (!dc_add_plane_to_context(
4718 dc, 4754 dc,
4719 dm_new_crtc_state->stream, 4755 dm_new_crtc_state->stream,
4720 dm_new_plane_state->dc_state, 4756 dc_new_plane_state,
4721 dm_state->context)) { 4757 dm_state->context)) {
4722 4758
4759 dc_plane_state_release(dc_new_plane_state);
4723 ret = -EINVAL; 4760 ret = -EINVAL;
4724 return ret; 4761 return ret;
4725 } 4762 }
4726 4763
4764 dm_new_plane_state->dc_state = dc_new_plane_state;
4765
4727 /* Tell DC to do a full surface update every time there 4766 /* Tell DC to do a full surface update every time there
4728 * is a plane change. Inefficient, but works for now. 4767 * is a plane change. Inefficient, but works for now.
4729 */ 4768 */
@@ -4737,6 +4776,33 @@ static int dm_update_planes_state(struct dc *dc,
4737 return ret; 4776 return ret;
4738} 4777}
4739 4778
4779static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4780 struct drm_crtc *crtc)
4781{
4782 struct drm_plane *plane;
4783 struct drm_crtc_state *crtc_state;
4784
4785 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
4786
4787 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
4788 struct drm_plane_state *plane_state =
4789 drm_atomic_get_plane_state(state, plane);
4790
4791 if (IS_ERR(plane_state))
4792 return -EDEADLK;
4793
4794 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4795 if (IS_ERR(crtc_state))
4796 return PTR_ERR(crtc_state);
4797
4798 if (crtc->primary == plane && crtc_state->active) {
4799 if (!plane_state->fb)
4800 return -EINVAL;
4801 }
4802 }
4803 return 0;
4804}
4805
4740static int amdgpu_dm_atomic_check(struct drm_device *dev, 4806static int amdgpu_dm_atomic_check(struct drm_device *dev,
4741 struct drm_atomic_state *state) 4807 struct drm_atomic_state *state)
4742{ 4808{
@@ -4760,6 +4826,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
4760 goto fail; 4826 goto fail;
4761 4827
4762 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4828 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4829 ret = dm_atomic_check_plane_state_fb(state, crtc);
4830 if (ret)
4831 goto fail;
4832
4763 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4833 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
4764 !new_crtc_state->color_mgmt_changed) 4834 !new_crtc_state->color_mgmt_changed)
4765 continue; 4835 continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9bd142f65f9b..e1acc10e35a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
109 struct cea_sad *sad = &sads[i]; 109 struct cea_sad *sad = &sads[i];
110 110
111 edid_caps->audio_modes[i].format_code = sad->format; 111 edid_caps->audio_modes[i].format_code = sad->format;
112 edid_caps->audio_modes[i].channel_count = sad->channels; 112 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
113 edid_caps->audio_modes[i].sample_rate = sad->freq; 113 edid_caps->audio_modes[i].sample_rate = sad->freq;
114 edid_caps->audio_modes[i].sample_size = sad->byte2; 114 edid_caps->audio_modes[i].sample_size = sad->byte2;
115 } 115 }
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 1874b6cee6af..422055080df4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
683 683
684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 684void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
685{ 685{
686 if (adev->mode_info.num_crtc > 0) 686
687 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 687 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
688 else
689 adev->crtc_irq.num_types = 0;
690 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 688 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
691 689
692 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 690 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f3d87f418d2e..93421dad21bd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
189 .link = aconnector->dc_link, 189 .link = aconnector->dc_link,
190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 190 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
191 191
192 /*
193 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
194 */
195 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
196 return;
197
192 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 198 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
193 199
194 if (!edid) { 200 if (!edid) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 35e84ed031de..12868c769606 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 1358 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1359} 1359}
1360 1360
1361void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 1361bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1362{ 1362{
1363 1363
1364 if (dc == NULL) 1364 if (dc == NULL)
1365 return; 1365 return false;
1366 1366
1367 dal_irq_service_set(dc->res_pool->irqs, src, enable); 1367 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1368} 1368}
1369 1369
1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 1370void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a37428271573..be5546181fa8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
1749 link->link_enc, 1749 link->link_enc,
1750 pipe_ctx->clock_source->id, 1750 pipe_ctx->clock_source->id,
1751 display_color_depth, 1751 display_color_depth,
1752 pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A, 1752 pipe_ctx->stream->signal,
1753 pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
1754 stream->phy_pix_clk); 1753 stream->phy_pix_clk);
1755 1754
1756 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 1755 if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 95b8dd0e53c6..4d07ffebfd31 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged(
1360 return true; 1360 return true;
1361} 1361}
1362 1362
1363/* Maximum TMDS single link pixel clock 165MHz */
1364#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
1365
1366static void update_stream_engine_usage( 1363static void update_stream_engine_usage(
1367 struct resource_context *res_ctx, 1364 struct resource_context *res_ctx,
1368 const struct resource_pool *pool, 1365 const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 539c3e0a6292..cd5819789d76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,8 +33,7 @@
33/******************************************************************************* 33/*******************************************************************************
34 * Private functions 34 * Private functions
35 ******************************************************************************/ 35 ******************************************************************************/
36#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000 36void update_stream_signal(struct dc_stream_state *stream)
37static void update_stream_signal(struct dc_stream_state *stream)
38{ 37{
39 38
40 struct dc_sink *dc_sink = stream->sink; 39 struct dc_sink *dc_sink = stream->sink;
@@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)
45 stream->signal = dc_sink->sink_signal; 44 stream->signal = dc_sink->sink_signal;
46 45
47 if (dc_is_dvi_signal(stream->signal)) { 46 if (dc_is_dvi_signal(stream->signal)) {
48 if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST && 47 if (stream->ctx->dc->caps.dual_link_dvi &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) 48 stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
49 stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; 50 stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
51 else 51 else
52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 52 stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@ -193,6 +193,7 @@ bool dc_stream_set_cursor_attributes(
193 193
194 core_dc = stream->ctx->dc; 194 core_dc = stream->ctx->dc;
195 res_ctx = &core_dc->current_state->res_ctx; 195 res_ctx = &core_dc->current_state->res_ctx;
196 stream->cursor_attributes = *attributes;
196 197
197 for (i = 0; i < MAX_PIPES; i++) { 198 for (i = 0; i < MAX_PIPES; i++) {
198 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 199 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -204,34 +205,8 @@ bool dc_stream_set_cursor_attributes(
204 continue; 205 continue;
205 206
206 207
207 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL) 208 core_dc->hwss.set_cursor_attribute(pipe_ctx);
208 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
209 pipe_ctx->plane_res.ipp, attributes);
210
211 if (pipe_ctx->plane_res.hubp != NULL &&
212 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
213 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
214 pipe_ctx->plane_res.hubp, attributes);
215
216 if (pipe_ctx->plane_res.mi != NULL &&
217 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
218 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
219 pipe_ctx->plane_res.mi, attributes);
220
221
222 if (pipe_ctx->plane_res.xfm != NULL &&
223 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
224 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
225 pipe_ctx->plane_res.xfm, attributes);
226
227 if (pipe_ctx->plane_res.dpp != NULL &&
228 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
229 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
230 pipe_ctx->plane_res.dpp, attributes->color_format);
231 } 209 }
232
233 stream->cursor_attributes = *attributes;
234
235 return true; 210 return true;
236} 211}
237 212
@@ -255,21 +230,10 @@ bool dc_stream_set_cursor_position(
255 230
256 core_dc = stream->ctx->dc; 231 core_dc = stream->ctx->dc;
257 res_ctx = &core_dc->current_state->res_ctx; 232 res_ctx = &core_dc->current_state->res_ctx;
233 stream->cursor_position = *position;
258 234
259 for (i = 0; i < MAX_PIPES; i++) { 235 for (i = 0; i < MAX_PIPES; i++) {
260 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; 236 struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
261 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
262 struct mem_input *mi = pipe_ctx->plane_res.mi;
263 struct hubp *hubp = pipe_ctx->plane_res.hubp;
264 struct dpp *dpp = pipe_ctx->plane_res.dpp;
265 struct dc_cursor_position pos_cpy = *position;
266 struct dc_cursor_mi_param param = {
267 .pixel_clk_khz = stream->timing.pix_clk_khz,
268 .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
269 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
270 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
271 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
272 };
273 237
274 if (pipe_ctx->stream != stream || 238 if (pipe_ctx->stream != stream ||
275 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || 239 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
@@ -278,33 +242,9 @@ bool dc_stream_set_cursor_position(
278 !pipe_ctx->plane_res.ipp) 242 !pipe_ctx->plane_res.ipp)
279 continue; 243 continue;
280 244
281 if (pipe_ctx->plane_state->address.type 245 core_dc->hwss.set_cursor_position(pipe_ctx);
282 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
283 pos_cpy.enable = false;
284
285 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
286 pos_cpy.enable = false;
287
288
289 if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
290 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
291
292 if (mi != NULL && mi->funcs->set_cursor_position != NULL)
293 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
294
295 if (!hubp)
296 continue;
297
298 if (hubp->funcs->set_cursor_position != NULL)
299 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
300
301 if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
302 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
303
304 } 246 }
305 247
306 stream->cursor_position = *position;
307
308 return true; 248 return true;
309} 249}
310 250
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index e2e3c9df79ea..d6d56611604e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -62,6 +62,7 @@ struct dc_caps {
62 bool dcc_const_color; 62 bool dcc_const_color;
63 bool dynamic_audio; 63 bool dynamic_audio;
64 bool is_apu; 64 bool is_apu;
65 bool dual_link_dvi;
65}; 66};
66 67
67struct dc_dcc_surface_param { 68struct dc_dcc_surface_param {
@@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
672 struct dc *dc, 673 struct dc *dc,
673 uint32_t src_id, 674 uint32_t src_id,
674 uint32_t ext_id); 675 uint32_t ext_id);
675void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable); 676bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
676void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); 677void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
677enum dc_irq_source dc_get_hpd_irq_source_at_index( 678enum dc_irq_source dc_get_hpd_irq_source_at_index(
678 struct dc *dc, uint32_t link_index); 679 struct dc *dc, uint32_t link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 01c60f11b2bd..456e4d29eadd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
237 */ 237 */
238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); 238struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
239 239
240void update_stream_signal(struct dc_stream_state *stream);
241
240void dc_stream_retain(struct dc_stream_state *dc_stream); 242void dc_stream_retain(struct dc_stream_state *dc_stream);
241void dc_stream_release(struct dc_stream_state *dc_stream); 243void dc_stream_release(struct dc_stream_state *dc_stream);
242 244
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index b73db9e78437..f11f17fe08f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -236,6 +236,7 @@
236 SR(D2VGA_CONTROL), \ 236 SR(D2VGA_CONTROL), \
237 SR(D3VGA_CONTROL), \ 237 SR(D3VGA_CONTROL), \
238 SR(D4VGA_CONTROL), \ 238 SR(D4VGA_CONTROL), \
239 SR(VGA_TEST_CONTROL), \
239 SR(DC_IP_REQUEST_CNTL), \ 240 SR(DC_IP_REQUEST_CNTL), \
240 BL_REG_LIST() 241 BL_REG_LIST()
241 242
@@ -337,6 +338,7 @@ struct dce_hwseq_registers {
337 uint32_t D2VGA_CONTROL; 338 uint32_t D2VGA_CONTROL;
338 uint32_t D3VGA_CONTROL; 339 uint32_t D3VGA_CONTROL;
339 uint32_t D4VGA_CONTROL; 340 uint32_t D4VGA_CONTROL;
341 uint32_t VGA_TEST_CONTROL;
340 /* MMHUB registers. read only. temporary hack */ 342 /* MMHUB registers. read only. temporary hack */
341 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; 343 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
342 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; 344 uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
@@ -493,6 +495,12 @@ struct dce_hwseq_registers {
493 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \ 495 HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
494 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
495 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
500 HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
501 HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
502 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
503 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
496 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 504 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
497 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) 505 HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
498 506
@@ -583,7 +591,13 @@ struct dce_hwseq_registers {
583 type DCFCLK_GATE_DIS; \ 591 type DCFCLK_GATE_DIS; \
584 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ 592 type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
585 type DENTIST_DPPCLK_WDIVIDER; \ 593 type DENTIST_DPPCLK_WDIVIDER; \
586 type DENTIST_DISPCLK_WDIVIDER; 594 type DENTIST_DISPCLK_WDIVIDER; \
595 type VGA_TEST_ENABLE; \
596 type VGA_TEST_RENDER_START; \
597 type D1VGA_MODE_ENABLE; \
598 type D2VGA_MODE_ENABLE; \
599 type D3VGA_MODE_ENABLE; \
600 type D4VGA_MODE_ENABLE;
587 601
588struct dce_hwseq_shift { 602struct dce_hwseq_shift {
589 HWSEQ_REG_FIELD_LIST(uint8_t) 603 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index a266e3f5e75f..e4741f1a2b01 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -82,13 +82,6 @@
82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20 82#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40 83#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
84 84
85/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
86#define TMDS_MIN_PIXEL_CLOCK 25000
87/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
88#define TMDS_MAX_PIXEL_CLOCK 165000
89/* For current ASICs pixel clock - 600MHz */
90#define MAX_ENCODER_CLOCK 600000
91
92enum { 85enum {
93 DP_MST_UPDATE_MAX_RETRY = 50 86 DP_MST_UPDATE_MAX_RETRY = 50
94}; 87};
@@ -683,6 +676,7 @@ void dce110_link_encoder_construct(
683{ 676{
684 struct bp_encoder_cap_info bp_cap_info = {0}; 677 struct bp_encoder_cap_info bp_cap_info = {0};
685 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; 678 const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
679 enum bp_result result = BP_RESULT_OK;
686 680
687 enc110->base.funcs = &dce110_lnk_enc_funcs; 681 enc110->base.funcs = &dce110_lnk_enc_funcs;
688 enc110->base.ctx = init_data->ctx; 682 enc110->base.ctx = init_data->ctx;
@@ -757,15 +751,24 @@ void dce110_link_encoder_construct(
757 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; 751 enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
758 } 752 }
759 753
754 /* default to one to mirror Windows behavior */
755 enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
756
757 result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
758 enc110->base.id, &bp_cap_info);
759
760 /* Override features with DCE-specific values */ 760 /* Override features with DCE-specific values */
761 if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info( 761 if (BP_RESULT_OK == result) {
762 enc110->base.ctx->dc_bios, enc110->base.id,
763 &bp_cap_info)) {
764 enc110->base.features.flags.bits.IS_HBR2_CAPABLE = 762 enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
765 bp_cap_info.DP_HBR2_EN; 763 bp_cap_info.DP_HBR2_EN;
766 enc110->base.features.flags.bits.IS_HBR3_CAPABLE = 764 enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
767 bp_cap_info.DP_HBR3_EN; 765 bp_cap_info.DP_HBR3_EN;
768 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; 766 enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
767 } else {
768 dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
769 "%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
770 __func__,
771 result);
769 } 772 }
770} 773}
771 774
@@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output(
904 struct link_encoder *enc, 907 struct link_encoder *enc,
905 enum clock_source_id clock_source, 908 enum clock_source_id clock_source,
906 enum dc_color_depth color_depth, 909 enum dc_color_depth color_depth,
907 bool hdmi, 910 enum signal_type signal,
908 bool dual_link,
909 uint32_t pixel_clock) 911 uint32_t pixel_clock)
910{ 912{
911 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); 913 struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
@@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output(
919 cntl.engine_id = enc->preferred_engine; 921 cntl.engine_id = enc->preferred_engine;
920 cntl.transmitter = enc110->base.transmitter; 922 cntl.transmitter = enc110->base.transmitter;
921 cntl.pll_id = clock_source; 923 cntl.pll_id = clock_source;
922 if (hdmi) { 924 cntl.signal = signal;
923 cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; 925 if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
924 cntl.lanes_number = 4;
925 } else if (dual_link) {
926 cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
927 cntl.lanes_number = 8; 926 cntl.lanes_number = 8;
928 } else { 927 else
929 cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
930 cntl.lanes_number = 4; 928 cntl.lanes_number = 4;
931 } 929
932 cntl.hpd_sel = enc110->base.hpd_source; 930 cntl.hpd_sel = enc110->base.hpd_source;
933 931
934 cntl.pixel_clock = pixel_clock; 932 cntl.pixel_clock = pixel_clock;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 8ca9afe47a2b..0ec3433d34b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
210 struct link_encoder *enc, 210 struct link_encoder *enc,
211 enum clock_source_id clock_source, 211 enum clock_source_id clock_source,
212 enum dc_color_depth color_depth, 212 enum dc_color_depth color_depth,
213 bool hdmi, 213 enum signal_type signal,
214 bool dual_link,
215 uint32_t pixel_clock); 214 uint32_t pixel_clock);
216 215
217/* enables DP PHY output */ 216/* enables DP PHY output */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index 3931412ab6d3..87093894ea9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -128,23 +128,22 @@ static void set_truncation(
128 return; 128 return;
129 } 129 }
130 /* on other format-to do */ 130 /* on other format-to do */
131 if (params->flags.TRUNCATE_ENABLED == 0 || 131 if (params->flags.TRUNCATE_ENABLED == 0)
132 params->flags.TRUNCATE_DEPTH == 2)
133 return; 132 return;
134 /*Set truncation depth and Enable truncation*/ 133 /*Set truncation depth and Enable truncation*/
135 REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, 134 REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
136 FMT_TRUNCATE_EN, 1, 135 FMT_TRUNCATE_EN, 1,
137 FMT_TRUNCATE_DEPTH, 136 FMT_TRUNCATE_DEPTH,
138 params->flags.TRUNCATE_MODE, 137 params->flags.TRUNCATE_DEPTH,
139 FMT_TRUNCATE_MODE, 138 FMT_TRUNCATE_MODE,
140 params->flags.TRUNCATE_DEPTH); 139 params->flags.TRUNCATE_MODE);
141} 140}
142 141
143 142
144/** 143/**
145 * set_spatial_dither 144 * set_spatial_dither
146 * 1) set spatial dithering mode: pattern of seed 145 * 1) set spatial dithering mode: pattern of seed
147 * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp 146 * 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
148 * 3) set random seed 147 * 3) set random seed
149 * 4) set random mode 148 * 4) set random mode
150 * lfsr is reset every frame or not reset 149 * lfsr is reset every frame or not reset
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 3ea43e2a9450..442dd2d93618 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -852,6 +852,7 @@ static bool construct(
852 dc->caps.max_downscale_ratio = 200; 852 dc->caps.max_downscale_ratio = 200;
853 dc->caps.i2c_speed_in_khz = 40; 853 dc->caps.i2c_speed_in_khz = 40;
854 dc->caps.max_cursor_size = 128; 854 dc->caps.max_cursor_size = 128;
855 dc->caps.dual_link_dvi = true;
855 856
856 for (i = 0; i < pool->base.pipe_count; i++) { 857 for (i = 0; i < pool->base.pipe_count; i++) {
857 pool->base.timing_generators[i] = 858 pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 86cdd7b4811f..6f382a3ac90f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 688 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
689 struct dc_link *link = pipe_ctx->stream->sink->link; 689 struct dc_link *link = pipe_ctx->stream->sink->link;
690 690
691 /* 1. update AVI info frame (HDMI, DP) 691
692 * we always need to update info frame
693 */
694 uint32_t active_total_with_borders; 692 uint32_t active_total_with_borders;
695 uint32_t early_control = 0; 693 uint32_t early_control = 0;
696 struct timing_generator *tg = pipe_ctx->stream_res.tg; 694 struct timing_generator *tg = pipe_ctx->stream_res.tg;
697 695
698 /* TODOFPGA may change to hwss.update_info_frame */ 696 /* For MST, there are multiply stream go to only one link.
697 * connect DIG back_end to front_end while enable_stream and
698 * disconnect them during disable_stream
699 * BY this, it is logic clean to separate stream and link */
700 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
701 pipe_ctx->stream_res.stream_enc->id, true);
702
703 /* update AVI info frame (HDMI, DP)*/
704 /* TODO: FPGA may change to hwss.update_info_frame */
699 dce110_update_info_frame(pipe_ctx); 705 dce110_update_info_frame(pipe_ctx);
706
700 /* enable early control to avoid corruption on DP monitor*/ 707 /* enable early control to avoid corruption on DP monitor*/
701 active_total_with_borders = 708 active_total_with_borders =
702 timing->h_addressable 709 timing->h_addressable
@@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
717 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); 724 pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
718 } 725 }
719 726
720 /* For MST, there are multiply stream go to only one link. 727
721 * connect DIG back_end to front_end while enable_stream and 728
722 * disconnect them during disable_stream
723 * BY this, it is logic clean to separate stream and link */
724 link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
725 pipe_ctx->stream_res.stream_enc->id, true);
726 729
727} 730}
728 731
@@ -1690,9 +1693,13 @@ static void apply_min_clocks(
1690 * Check if FBC can be enabled 1693 * Check if FBC can be enabled
1691 */ 1694 */
1692static bool should_enable_fbc(struct dc *dc, 1695static bool should_enable_fbc(struct dc *dc,
1693 struct dc_state *context) 1696 struct dc_state *context,
1697 uint32_t *pipe_idx)
1694{ 1698{
1695 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1699 uint32_t i;
1700 struct pipe_ctx *pipe_ctx = NULL;
1701 struct resource_context *res_ctx = &context->res_ctx;
1702
1696 1703
1697 ASSERT(dc->fbc_compressor); 1704 ASSERT(dc->fbc_compressor);
1698 1705
@@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc,
1704 if (context->stream_count != 1) 1711 if (context->stream_count != 1)
1705 return false; 1712 return false;
1706 1713
1714 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1715 if (res_ctx->pipe_ctx[i].stream) {
1716 pipe_ctx = &res_ctx->pipe_ctx[i];
1717 *pipe_idx = i;
1718 break;
1719 }
1720 }
1721
1707 /* Only supports eDP */ 1722 /* Only supports eDP */
1708 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) 1723 if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
1709 return false; 1724 return false;
@@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc,
1729static void enable_fbc(struct dc *dc, 1744static void enable_fbc(struct dc *dc,
1730 struct dc_state *context) 1745 struct dc_state *context)
1731{ 1746{
1732 if (should_enable_fbc(dc, context)) { 1747 uint32_t pipe_idx = 0;
1748
1749 if (should_enable_fbc(dc, context, &pipe_idx)) {
1733 /* Program GRPH COMPRESSED ADDRESS and PITCH */ 1750 /* Program GRPH COMPRESSED ADDRESS and PITCH */
1734 struct compr_addr_and_pitch_params params = {0, 0, 0}; 1751 struct compr_addr_and_pitch_params params = {0, 0, 0};
1735 struct compressor *compr = dc->fbc_compressor; 1752 struct compressor *compr = dc->fbc_compressor;
1736 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0]; 1753 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
1754
1737 1755
1738 params.source_view_width = pipe_ctx->stream->timing.h_addressable; 1756 params.source_view_width = pipe_ctx->stream->timing.h_addressable;
1739 params.source_view_height = pipe_ctx->stream->timing.v_addressable; 1757 params.source_view_height = pipe_ctx->stream->timing.v_addressable;
@@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
2915 } 2933 }
2916} 2934}
2917 2935
2936void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
2937{
2938 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2939 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
2940 struct mem_input *mi = pipe_ctx->plane_res.mi;
2941 struct dc_cursor_mi_param param = {
2942 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2943 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2944 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2945 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2946 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2947 };
2948
2949 if (pipe_ctx->plane_state->address.type
2950 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2951 pos_cpy.enable = false;
2952
2953 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2954 pos_cpy.enable = false;
2955
2956 if (ipp->funcs->ipp_cursor_set_position)
2957 ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
2958 if (mi->funcs->set_cursor_position)
2959 mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
2960}
2961
2962void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2963{
2964 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2965
2966 if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
2967 pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
2968 pipe_ctx->plane_res.ipp, attributes);
2969
2970 if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
2971 pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
2972 pipe_ctx->plane_res.mi, attributes);
2973
2974 if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
2975 pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
2976 pipe_ctx->plane_res.xfm, attributes);
2977}
2978
2918static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} 2979static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
2919 2980
2920static void optimize_shared_resources(struct dc *dc) {} 2981static void optimize_shared_resources(struct dc *dc) {}
@@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
2957 .edp_backlight_control = hwss_edp_backlight_control, 3018 .edp_backlight_control = hwss_edp_backlight_control,
2958 .edp_power_control = hwss_edp_power_control, 3019 .edp_power_control = hwss_edp_power_control,
2959 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 3020 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
3021 .set_cursor_position = dce110_set_cursor_position,
3022 .set_cursor_attribute = dce110_set_cursor_attribute
2960}; 3023};
2961 3024
2962void dce110_hw_sequencer_construct(struct dc *dc) 3025void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c4779578fb7..00f18c485e1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
846 return result; 846 return result;
847} 847}
848 848
849enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
850 struct dc_caps *caps)
851{
852 if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
853 ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
854 return DC_FAIL_SURFACE_VALIDATE;
855
856 return DC_OK;
857}
858
849static bool dce110_validate_surface_sets( 859static bool dce110_validate_surface_sets(
850 struct dc_state *context) 860 struct dc_state *context)
851{ 861{
@@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
869 plane->src_rect.height > 1080)) 879 plane->src_rect.height > 1080))
870 return false; 880 return false;
871 881
882 /* we don't have the logic to support underlay
883 * only yet so block the use case where we get
884 * NV12 plane as top layer
885 */
886 if (j == 0)
887 return false;
888
872 /* irrespective of plane format, 889 /* irrespective of plane format,
873 * stream should be RGB encoded 890 * stream should be RGB encoded
874 */ 891 */
@@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
1021 .link_enc_create = dce110_link_encoder_create, 1038 .link_enc_create = dce110_link_encoder_create,
1022 .validate_guaranteed = dce110_validate_guaranteed, 1039 .validate_guaranteed = dce110_validate_guaranteed,
1023 .validate_bandwidth = dce110_validate_bandwidth, 1040 .validate_bandwidth = dce110_validate_bandwidth,
1041 .validate_plane = dce110_validate_plane,
1024 .acquire_idle_pipe_for_layer = dce110_acquire_underlay, 1042 .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
1025 .add_stream_to_ctx = dce110_add_stream_to_ctx, 1043 .add_stream_to_ctx = dce110_add_stream_to_ctx,
1026 .validate_global = dce110_validate_global 1044 .validate_global = dce110_validate_global
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 663e0a047a4b..98d9cd0109e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -1103,6 +1103,8 @@ static bool construct(
1103 dc->caps.max_downscale_ratio = 200; 1103 dc->caps.max_downscale_ratio = 200;
1104 dc->caps.i2c_speed_in_khz = 100; 1104 dc->caps.i2c_speed_in_khz = 100;
1105 dc->caps.max_cursor_size = 128; 1105 dc->caps.max_cursor_size = 128;
1106 dc->caps.dual_link_dvi = true;
1107
1106 1108
1107 /************************************************* 1109 /*************************************************
1108 * Create resources * 1110 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 57cd67359567..5aab01db28ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -835,6 +835,8 @@ static bool construct(
835 dc->caps.max_downscale_ratio = 200; 835 dc->caps.max_downscale_ratio = 200;
836 dc->caps.i2c_speed_in_khz = 100; 836 dc->caps.i2c_speed_in_khz = 100;
837 dc->caps.max_cursor_size = 128; 837 dc->caps.max_cursor_size = 128;
838 dc->caps.dual_link_dvi = true;
839
838 dc->debug = debug_defaults; 840 dc->debug = debug_defaults;
839 841
840 /************************************************* 842 /*************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 8f2bd56f3461..25d7eb1567ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -793,6 +793,7 @@ static bool dce80_construct(
793 dc->caps.max_downscale_ratio = 200; 793 dc->caps.max_downscale_ratio = 200;
794 dc->caps.i2c_speed_in_khz = 40; 794 dc->caps.i2c_speed_in_khz = 40;
795 dc->caps.max_cursor_size = 128; 795 dc->caps.max_cursor_size = 128;
796 dc->caps.dual_link_dvi = true;
796 797
797 /************************************************* 798 /*************************************************
798 * Create resources * 799 * Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 82572863acab..dc1e010725c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -238,10 +238,34 @@ static void enable_power_gating_plane(
238static void disable_vga( 238static void disable_vga(
239 struct dce_hwseq *hws) 239 struct dce_hwseq *hws)
240{ 240{
241 unsigned int in_vga1_mode = 0;
242 unsigned int in_vga2_mode = 0;
243 unsigned int in_vga3_mode = 0;
244 unsigned int in_vga4_mode = 0;
245
246 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
247 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
248 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
249 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
250
251 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
252 in_vga3_mode == 0 && in_vga4_mode == 0)
253 return;
254
241 REG_WRITE(D1VGA_CONTROL, 0); 255 REG_WRITE(D1VGA_CONTROL, 0);
242 REG_WRITE(D2VGA_CONTROL, 0); 256 REG_WRITE(D2VGA_CONTROL, 0);
243 REG_WRITE(D3VGA_CONTROL, 0); 257 REG_WRITE(D3VGA_CONTROL, 0);
244 REG_WRITE(D4VGA_CONTROL, 0); 258 REG_WRITE(D4VGA_CONTROL, 0);
259
260 /* HW Engineer's Notes:
261 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
262 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
263 *
264 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
265 * VGA_TEST_ENABLE, to leave it in the same state as before.
266 */
267 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
268 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
245} 269}
246 270
247static void dpp_pg_control( 271static void dpp_pg_control(
@@ -1761,6 +1785,11 @@ static void update_dchubp_dpp(
1761 &pipe_ctx->plane_res.scl_data.viewport_c); 1785 &pipe_ctx->plane_res.scl_data.viewport_c);
1762 } 1786 }
1763 1787
1788 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
1789 dc->hwss.set_cursor_position(pipe_ctx);
1790 dc->hwss.set_cursor_attribute(pipe_ctx);
1791 }
1792
1764 if (plane_state->update_flags.bits.full_update) { 1793 if (plane_state->update_flags.bits.full_update) {
1765 /*gamut remap*/ 1794 /*gamut remap*/
1766 program_gamut_remap(pipe_ctx); 1795 program_gamut_remap(pipe_ctx);
@@ -2296,7 +2325,7 @@ static bool dcn10_dummy_display_power_gating(
2296 return true; 2325 return true;
2297} 2326}
2298 2327
2299void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx) 2328static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2300{ 2329{
2301 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2330 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2302 struct timing_generator *tg = pipe_ctx->stream_res.tg; 2331 struct timing_generator *tg = pipe_ctx->stream_res.tg;
@@ -2316,12 +2345,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2316 } 2345 }
2317} 2346}
2318 2347
2319void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) 2348static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2320{ 2349{
2321 if (hws->ctx->dc->res_pool->hubbub != NULL) 2350 if (hws->ctx->dc->res_pool->hubbub != NULL)
2322 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data); 2351 hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
2323} 2352}
2324 2353
2354static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2355{
2356 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2357 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2358 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2359 struct dc_cursor_mi_param param = {
2360 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
2361 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
2362 .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
2363 .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
2364 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
2365 };
2366
2367 if (pipe_ctx->plane_state->address.type
2368 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2369 pos_cpy.enable = false;
2370
2371 if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
2372 pos_cpy.enable = false;
2373
2374 hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
2375 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
2376}
2377
2378static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
2379{
2380 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
2381
2382 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
2383 pipe_ctx->plane_res.hubp, attributes);
2384 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
2385 pipe_ctx->plane_res.dpp, attributes->color_format);
2386}
2387
2325static const struct hw_sequencer_funcs dcn10_funcs = { 2388static const struct hw_sequencer_funcs dcn10_funcs = {
2326 .program_gamut_remap = program_gamut_remap, 2389 .program_gamut_remap = program_gamut_remap,
2327 .program_csc_matrix = program_csc_matrix, 2390 .program_csc_matrix = program_csc_matrix,
@@ -2362,6 +2425,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
2362 .edp_backlight_control = hwss_edp_backlight_control, 2425 .edp_backlight_control = hwss_edp_backlight_control,
2363 .edp_power_control = hwss_edp_power_control, 2426 .edp_power_control = hwss_edp_power_control,
2364 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, 2427 .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
2428 .set_cursor_position = dcn10_set_cursor_position,
2429 .set_cursor_attribute = dcn10_set_cursor_attribute
2365}; 2430};
2366 2431
2367 2432
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 0fd329deacd8..54d8a1386142 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -123,8 +123,7 @@ struct link_encoder_funcs {
123 void (*enable_tmds_output)(struct link_encoder *enc, 123 void (*enable_tmds_output)(struct link_encoder *enc,
124 enum clock_source_id clock_source, 124 enum clock_source_id clock_source,
125 enum dc_color_depth color_depth, 125 enum dc_color_depth color_depth,
126 bool hdmi, 126 enum signal_type signal,
127 bool dual_link,
128 uint32_t pixel_clock); 127 uint32_t pixel_clock);
129 void (*enable_dp_output)(struct link_encoder *enc, 128 void (*enable_dp_output)(struct link_encoder *enc,
130 const struct dc_link_settings *link_settings, 129 const struct dc_link_settings *link_settings,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 4c0aa56f7bae..379c6ecd271a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -198,6 +198,9 @@ struct hw_sequencer_funcs {
198 bool enable); 198 bool enable);
199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up); 199 void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
200 200
201 void (*set_cursor_position)(struct pipe_ctx *pipe);
202 void (*set_cursor_attribute)(struct pipe_ctx *pipe);
203
201}; 204};
202 205
203void color_space_to_black_color( 206void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index f7e40b292dfb..d3e1923b01a8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -217,7 +217,7 @@ bool dce110_vblank_set(
217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg; 217 core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
218 218
219 if (enable) { 219 if (enable) {
220 if (!tg->funcs->arm_vert_intr(tg, 2)) { 220 if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
221 DC_ERROR("Failed to get VBLANK!\n"); 221 DC_ERROR("Failed to get VBLANK!\n");
222 return false; 222 return false;
223 } 223 }
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 57a54a7b89e5..1c079ba37c30 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
42 struct link_encoder *enc, 42 struct link_encoder *enc,
43 enum clock_source_id clock_source, 43 enum clock_source_id clock_source,
44 enum dc_color_depth color_depth, 44 enum dc_color_depth color_depth,
45 bool hdmi, 45 enum signal_type signal,
46 bool dual_link,
47 uint32_t pixel_clock) {} 46 uint32_t pixel_clock) {}
48 47
49static void virtual_link_encoder_enable_dp_output( 48static void virtual_link_encoder_enable_dp_output(
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 7a9b43f84a31..36bbad594267 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -419,11 +419,6 @@ struct bios_event_info {
419 bool backlight_changed; 419 bool backlight_changed;
420}; 420};
421 421
422enum {
423 HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
424 TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
425};
426
427/* 422/*
428 * DFS-bypass flag 423 * DFS-bypass flag
429 */ 424 */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index b5ebde642207..199c5db67cbc 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -26,6 +26,11 @@
26#ifndef __DC_SIGNAL_TYPES_H__ 26#ifndef __DC_SIGNAL_TYPES_H__
27#define __DC_SIGNAL_TYPES_H__ 27#define __DC_SIGNAL_TYPES_H__
28 28
29/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
30#define TMDS_MIN_PIXEL_CLOCK 25000
31/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
32#define TMDS_MAX_PIXEL_CLOCK 165000
33
29enum signal_type { 34enum signal_type {
30 SIGNAL_TYPE_NONE = 0L, /* no signal */ 35 SIGNAL_TYPE_NONE = 0L, /* no signal */
31 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0), 36 SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 5f4c2e833a65..d665dd5af5dd 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
97 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 97 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
98 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 98 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
99 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 99 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
100 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ 100 {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
101 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 101 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
102 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 102 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
103 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 103 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
127 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 127 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
128 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 128 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
129 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 129 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
130 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ 130 {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
131 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 131 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
132 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 132 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
133 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 133 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index c0530a1af5e3..2dc5e8bed172 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -461,6 +461,12 @@ int drm_mode_getfb(struct drm_device *dev,
461 if (!fb) 461 if (!fb)
462 return -ENOENT; 462 return -ENOENT;
463 463
464 /* Multi-planar framebuffers need getfb2. */
465 if (fb->format->num_planes > 1) {
466 ret = -EINVAL;
467 goto out;
468 }
469
464 r->height = fb->height; 470 r->height = fb->height;
465 r->width = fb->width; 471 r->width = fb->width;
466 r->depth = fb->format->depth; 472 r->depth = fb->format->depth;
@@ -484,6 +490,7 @@ int drm_mode_getfb(struct drm_device *dev,
484 ret = -ENODEV; 490 ret = -ENODEV;
485 } 491 }
486 492
493out:
487 drm_framebuffer_put(fb); 494 drm_framebuffer_put(fb);
488 495
489 return ret; 496 return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index c8454ac43fae..db6b94dda5df 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -471,6 +471,7 @@ struct parser_exec_state {
471 * used when ret from 2nd level batch buffer 471 * used when ret from 2nd level batch buffer
472 */ 472 */
473 int saved_buf_addr_type; 473 int saved_buf_addr_type;
474 bool is_ctx_wa;
474 475
475 struct cmd_info *info; 476 struct cmd_info *info;
476 477
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1715 bb->accessing = true; 1716 bb->accessing = true;
1716 bb->bb_start_cmd_va = s->ip_va; 1717 bb->bb_start_cmd_va = s->ip_va;
1717 1718
1719 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1720 bb->bb_offset = s->ip_va - s->rb_va;
1721 else
1722 bb->bb_offset = 0;
1723
1718 /* 1724 /*
1719 * ip_va saves the virtual address of the shadow batch buffer, while 1725 * ip_va saves the virtual address of the shadow batch buffer, while
1720 * ip_gma saves the graphics address of the original batch buffer. 1726 * ip_gma saves the graphics address of the original batch buffer.
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
2571 s.ring_tail = gma_tail; 2577 s.ring_tail = gma_tail;
2572 s.rb_va = workload->shadow_ring_buffer_va; 2578 s.rb_va = workload->shadow_ring_buffer_va;
2573 s.workload = workload; 2579 s.workload = workload;
2580 s.is_ctx_wa = false;
2574 2581
2575 if ((bypass_scan_mask & (1 << workload->ring_id)) || 2582 if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2576 gma_head == gma_tail) 2583 gma_head == gma_tail)
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2624 s.ring_tail = gma_tail; 2631 s.ring_tail = gma_tail;
2625 s.rb_va = wa_ctx->indirect_ctx.shadow_va; 2632 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2626 s.workload = workload; 2633 s.workload = workload;
2634 s.is_ctx_wa = true;
2627 2635
2628 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { 2636 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2629 ret = -EINVAL; 2637 ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 256f1bb522b7..152df3d0291e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
394 * performace for batch mmio read/write, so we need 394 * performace for batch mmio read/write, so we need
395 * handle forcewake mannually. 395 * handle forcewake mannually.
396 */ 396 */
397 intel_runtime_pm_get(dev_priv);
397 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 398 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
398 switch_mmio(pre, next, ring_id); 399 switch_mmio(pre, next, ring_id);
399 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 400 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
401 intel_runtime_pm_put(dev_priv);
400} 402}
401 403
402/** 404/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b55b3580ca1d..d74d6f05c62c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(
52 pdp_pair[i].val = pdp[7 - i]; 52 pdp_pair[i].val = pdp[7 - i];
53} 53}
54 54
55/*
56 * when populating shadow ctx from guest, we should not overrride oa related
57 * registers, so that they will not be overlapped by guest oa configs. Thus
58 * made it possible to capture oa data from host for both host and guests.
59 */
60static void sr_oa_regs(struct intel_vgpu_workload *workload,
61 u32 *reg_state, bool save)
62{
63 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
64 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
65 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
66 int i = 0;
67 u32 flex_mmio[] = {
68 i915_mmio_reg_offset(EU_PERF_CNTL0),
69 i915_mmio_reg_offset(EU_PERF_CNTL1),
70 i915_mmio_reg_offset(EU_PERF_CNTL2),
71 i915_mmio_reg_offset(EU_PERF_CNTL3),
72 i915_mmio_reg_offset(EU_PERF_CNTL4),
73 i915_mmio_reg_offset(EU_PERF_CNTL5),
74 i915_mmio_reg_offset(EU_PERF_CNTL6),
75 };
76
77 if (!workload || !reg_state || workload->ring_id != RCS)
78 return;
79
80 if (save) {
81 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
82
83 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
84 u32 state_offset = ctx_flexeu0 + i * 2;
85
86 workload->flex_mmio[i] = reg_state[state_offset + 1];
87 }
88 } else {
89 reg_state[ctx_oactxctrl] =
90 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
91 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
92
93 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
94 u32 state_offset = ctx_flexeu0 + i * 2;
95 u32 mmio = flex_mmio[i];
96
97 reg_state[state_offset] = mmio;
98 reg_state[state_offset + 1] = workload->flex_mmio[i];
99 }
100 }
101}
102
55static int populate_shadow_context(struct intel_vgpu_workload *workload) 103static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{ 104{
57 struct intel_vgpu *vgpu = workload->vgpu; 105 struct intel_vgpu *vgpu = workload->vgpu;
@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 146 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 shadow_ring_context = kmap(page); 147 shadow_ring_context = kmap(page);
100 148
149 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
101#define COPY_REG(name) \ 150#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ 151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 152 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
122 sizeof(*shadow_ring_context), 171 sizeof(*shadow_ring_context),
123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 172 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124 173
174 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
125 kunmap(page); 175 kunmap(page);
126 return 0; 176 return 0;
127} 177}
@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
376 goto err; 426 goto err;
377 } 427 }
378 428
429 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
430 * is only updated into ring_scan_buffer, not real ring address
431 * allocated in later copy_workload_to_ring_buffer. pls be noted
432 * shadow_ring_buffer_va is now pointed to real ring buffer va
433 * in copy_workload_to_ring_buffer.
434 */
435
436 if (bb->bb_offset)
437 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
438 + bb->bb_offset;
439
379 /* relocate shadow batch buffer */ 440 /* relocate shadow batch buffer */
380 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); 441 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
381 if (gmadr_bytes == 8) 442 if (gmadr_bytes == 8)
@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1044 1105
1045 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1106 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1046 1107
1047 s->workloads = kmem_cache_create("gvt-g_vgpu_workload", 1108 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1048 sizeof(struct intel_vgpu_workload), 0, 1109 sizeof(struct intel_vgpu_workload), 0,
1049 SLAB_HWCACHE_ALIGN, 1110 SLAB_HWCACHE_ALIGN,
1050 NULL); 1111 offsetof(struct intel_vgpu_workload, rb_tail),
1112 sizeof_field(struct intel_vgpu_workload, rb_tail),
1113 NULL);
1051 1114
1052 if (!s->workloads) { 1115 if (!s->workloads) {
1053 ret = -ENOMEM; 1116 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ff175a98b19e..a79a4f60637e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -110,6 +110,10 @@ struct intel_vgpu_workload {
110 /* shadow batch buffer */ 110 /* shadow batch buffer */
111 struct list_head shadow_bb; 111 struct list_head shadow_bb;
112 struct intel_shadow_wa_ctx wa_ctx; 112 struct intel_shadow_wa_ctx wa_ctx;
113
114 /* oa registers */
115 u32 oactxctrl;
116 u32 flex_mmio[7];
113}; 117};
114 118
115struct intel_vgpu_shadow_bb { 119struct intel_vgpu_shadow_bb {
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {
120 u32 *bb_start_cmd_va; 124 u32 *bb_start_cmd_va;
121 unsigned int clflush; 125 unsigned int clflush;
122 bool accessing; 126 bool accessing;
127 unsigned long bb_offset;
123}; 128};
124 129
125#define workload_q_head(vgpu, ring_id) \ 130#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dd89abd2263d..6ff5d655c202 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
434 dma_fence_put(shared[i]); 434 dma_fence_put(shared[i]);
435 kfree(shared); 435 kfree(shared);
436 436
437 /*
438 * If both shared fences and an exclusive fence exist,
439 * then by construction the shared fences must be later
440 * than the exclusive fence. If we successfully wait for
441 * all the shared fences, we know that the exclusive fence
442 * must all be signaled. If all the shared fences are
443 * signaled, we can prune the array and recover the
444 * floating references on the fences/requests.
445 */
437 prune_fences = count && timeout >= 0; 446 prune_fences = count && timeout >= 0;
438 } else { 447 } else {
439 excl = reservation_object_get_excl_rcu(resv); 448 excl = reservation_object_get_excl_rcu(resv);
440 } 449 }
441 450
442 if (excl && timeout >= 0) { 451 if (excl && timeout >= 0)
443 timeout = i915_gem_object_wait_fence(excl, flags, timeout, 452 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444 rps_client); 453 rps_client);
445 prune_fences = timeout >= 0;
446 }
447 454
448 dma_fence_put(excl); 455 dma_fence_put(excl);
449 456
450 /* Oportunistically prune the fences iff we know they have *all* been 457 /*
458 * Opportunistically prune the fences iff we know they have *all* been
451 * signaled and that the reservation object has not been changed (i.e. 459 * signaled and that the reservation object has not been changed (i.e.
452 * no new fences have been added). 460 * no new fences have been added).
453 */ 461 */
@@ -3205,8 +3213,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3205 * rolling the global seqno forward (since this would complete requests 3213 * rolling the global seqno forward (since this would complete requests
3206 * for which we haven't set the fence error to EIO yet). 3214 * for which we haven't set the fence error to EIO yet).
3207 */ 3215 */
3208 for_each_engine(engine, i915, id) 3216 for_each_engine(engine, i915, id) {
3217 i915_gem_reset_prepare_engine(engine);
3209 engine->submit_request = nop_submit_request; 3218 engine->submit_request = nop_submit_request;
3219 }
3210 3220
3211 /* 3221 /*
3212 * Make sure no one is running the old callback before we proceed with 3222 * Make sure no one is running the old callback before we proceed with
@@ -3244,6 +3254,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
3244 intel_engine_init_global_seqno(engine, 3254 intel_engine_init_global_seqno(engine,
3245 intel_engine_last_submit(engine)); 3255 intel_engine_last_submit(engine));
3246 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3256 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3257
3258 i915_gem_reset_finish_engine(engine);
3247 } 3259 }
3248 3260
3249 set_bit(I915_WEDGED, &i915->gpu_error.flags); 3261 set_bit(I915_WEDGED, &i915->gpu_error.flags);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0be50e43507d..f8fe5ffcdcff 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
1303 */ 1303 */
1304 mutex_lock(&dev_priv->drm.struct_mutex); 1304 mutex_lock(&dev_priv->drm.struct_mutex);
1305 dev_priv->perf.oa.exclusive_stream = NULL; 1305 dev_priv->perf.oa.exclusive_stream = NULL;
1306 mutex_unlock(&dev_priv->drm.struct_mutex);
1307
1308 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 1306 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
1307 mutex_unlock(&dev_priv->drm.struct_mutex);
1309 1308
1310 free_oa_buffer(dev_priv); 1309 free_oa_buffer(dev_priv);
1311 1310
@@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
1756 * Note: it's only the RCS/Render context that has any OA state. 1755 * Note: it's only the RCS/Render context that has any OA state.
1757 */ 1756 */
1758static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, 1757static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1759 const struct i915_oa_config *oa_config, 1758 const struct i915_oa_config *oa_config)
1760 bool interruptible)
1761{ 1759{
1762 struct i915_gem_context *ctx; 1760 struct i915_gem_context *ctx;
1763 int ret; 1761 int ret;
1764 unsigned int wait_flags = I915_WAIT_LOCKED; 1762 unsigned int wait_flags = I915_WAIT_LOCKED;
1765 1763
1766 if (interruptible) { 1764 lockdep_assert_held(&dev_priv->drm.struct_mutex);
1767 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
1768 if (ret)
1769 return ret;
1770
1771 wait_flags |= I915_WAIT_INTERRUPTIBLE;
1772 } else {
1773 mutex_lock(&dev_priv->drm.struct_mutex);
1774 }
1775 1765
1776 /* Switch away from any user context. */ 1766 /* Switch away from any user context. */
1777 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config); 1767 ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
@@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
1819 } 1809 }
1820 1810
1821 out: 1811 out:
1822 mutex_unlock(&dev_priv->drm.struct_mutex);
1823
1824 return ret; 1812 return ret;
1825} 1813}
1826 1814
@@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1863 * to make sure all slices/subslices are ON before writing to NOA 1851 * to make sure all slices/subslices are ON before writing to NOA
1864 * registers. 1852 * registers.
1865 */ 1853 */
1866 ret = gen8_configure_all_contexts(dev_priv, oa_config, true); 1854 ret = gen8_configure_all_contexts(dev_priv, oa_config);
1867 if (ret) 1855 if (ret)
1868 return ret; 1856 return ret;
1869 1857
@@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
1878static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) 1866static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1879{ 1867{
1880 /* Reset all contexts' slices/subslices configurations. */ 1868 /* Reset all contexts' slices/subslices configurations. */
1881 gen8_configure_all_contexts(dev_priv, NULL, false); 1869 gen8_configure_all_contexts(dev_priv, NULL);
1882 1870
1883 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & 1871 I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
1884 ~GT_NOA_ENABLE)); 1872 ~GT_NOA_ENABLE));
@@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
1888static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) 1876static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
1889{ 1877{
1890 /* Reset all contexts' slices/subslices configurations. */ 1878 /* Reset all contexts' slices/subslices configurations. */
1891 gen8_configure_all_contexts(dev_priv, NULL, false); 1879 gen8_configure_all_contexts(dev_priv, NULL);
1892 1880
1893 /* Make sure we disable noa to save power. */ 1881 /* Make sure we disable noa to save power. */
1894 I915_WRITE(RPM_CONFIG1, 1882 I915_WRITE(RPM_CONFIG1,
@@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2138 if (ret) 2126 if (ret)
2139 goto err_oa_buf_alloc; 2127 goto err_oa_buf_alloc;
2140 2128
2129 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2130 if (ret)
2131 goto err_lock;
2132
2141 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, 2133 ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
2142 stream->oa_config); 2134 stream->oa_config);
2143 if (ret) 2135 if (ret)
@@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
2145 2137
2146 stream->ops = &i915_oa_stream_ops; 2138 stream->ops = &i915_oa_stream_ops;
2147 2139
2148 /* Lock device for exclusive_stream access late because
2149 * enable_metric_set() might lock as well on gen8+.
2150 */
2151 ret = i915_mutex_lock_interruptible(&dev_priv->drm);
2152 if (ret)
2153 goto err_lock;
2154
2155 dev_priv->perf.oa.exclusive_stream = stream; 2140 dev_priv->perf.oa.exclusive_stream = stream;
2156 2141
2157 mutex_unlock(&dev_priv->drm.struct_mutex); 2142 mutex_unlock(&dev_priv->drm.struct_mutex);
2158 2143
2159 return 0; 2144 return 0;
2160 2145
2161err_lock: 2146err_enable:
2162 dev_priv->perf.oa.ops.disable_metric_set(dev_priv); 2147 dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
2148 mutex_unlock(&dev_priv->drm.struct_mutex);
2163 2149
2164err_enable: 2150err_lock:
2165 free_oa_buffer(dev_priv); 2151 free_oa_buffer(dev_priv);
2166 2152
2167err_oa_buf_alloc: 2153err_oa_buf_alloc:
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index b33d2158c234..e5e6f6bb2b05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
304{ 304{
305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
306 struct intel_rps *rps = &dev_priv->gt_pm.rps; 306 struct intel_rps *rps = &dev_priv->gt_pm.rps;
307 u32 val; 307 bool boost = false;
308 ssize_t ret; 308 ssize_t ret;
309 u32 val;
309 310
310 ret = kstrtou32(buf, 0, &val); 311 ret = kstrtou32(buf, 0, &val);
311 if (ret) 312 if (ret)
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
317 return -EINVAL; 318 return -EINVAL;
318 319
319 mutex_lock(&dev_priv->pcu_lock); 320 mutex_lock(&dev_priv->pcu_lock);
320 rps->boost_freq = val; 321 if (val != rps->boost_freq) {
322 rps->boost_freq = val;
323 boost = atomic_read(&rps->num_waiters);
324 }
321 mutex_unlock(&dev_priv->pcu_lock); 325 mutex_unlock(&dev_priv->pcu_lock);
326 if (boost)
327 schedule_work(&rps->work);
322 328
323 return count; 329 return count;
324} 330}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f51645a08dca..6aff9d096e13 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2175,8 +2175,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2175 intel_prepare_dp_ddi_buffers(encoder, crtc_state); 2175 intel_prepare_dp_ddi_buffers(encoder, crtc_state);
2176 2176
2177 intel_ddi_init_dp_buf_reg(encoder); 2177 intel_ddi_init_dp_buf_reg(encoder);
2178 if (!is_mst) 2178 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2179 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2180 intel_dp_start_link_train(intel_dp); 2179 intel_dp_start_link_train(intel_dp);
2181 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) 2180 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
2182 intel_dp_stop_link_train(intel_dp); 2181 intel_dp_stop_link_train(intel_dp);
@@ -2274,14 +2273,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
2274 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2273 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2275 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 2274 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
2276 struct intel_dp *intel_dp = &dig_port->dp; 2275 struct intel_dp *intel_dp = &dig_port->dp;
2277 bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
2278 2276
2279 /* 2277 /*
2280 * Power down sink before disabling the port, otherwise we end 2278 * Power down sink before disabling the port, otherwise we end
2281 * up getting interrupts from the sink on detecting link loss. 2279 * up getting interrupts from the sink on detecting link loss.
2282 */ 2280 */
2283 if (!is_mst) 2281 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2284 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2285 2282
2286 intel_disable_ddi_buf(encoder); 2283 intel_disable_ddi_buf(encoder);
2287 2284
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 35c5299feab6..a29868cd30c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -620,19 +620,15 @@ static int
620bxt_power_sequencer_idx(struct intel_dp *intel_dp) 620bxt_power_sequencer_idx(struct intel_dp *intel_dp)
621{ 621{
622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
623 int backlight_controller = dev_priv->vbt.backlight.controller;
623 624
624 lockdep_assert_held(&dev_priv->pps_mutex); 625 lockdep_assert_held(&dev_priv->pps_mutex);
625 626
626 /* We should never land here with regular DP ports */ 627 /* We should never land here with regular DP ports */
627 WARN_ON(!intel_dp_is_edp(intel_dp)); 628 WARN_ON(!intel_dp_is_edp(intel_dp));
628 629
629 /*
630 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
631 * mapping needs to be retrieved from VBT, for now just hard-code to
632 * use instance #0 always.
633 */
634 if (!intel_dp->pps_reset) 630 if (!intel_dp->pps_reset)
635 return 0; 631 return backlight_controller;
636 632
637 intel_dp->pps_reset = false; 633 intel_dp->pps_reset = false;
638 634
@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
642 */ 638 */
643 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 639 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
644 640
645 return 0; 641 return backlight_controller;
646} 642}
647 643
648typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 644typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 348a4f7ffb67..53747318f4a7 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
246 */ 246 */
247 tmp = I915_READ_CTL(engine); 247 tmp = I915_READ_CTL(engine);
248 if (tmp & RING_WAIT) { 248 if (tmp & RING_WAIT) {
249 i915_handle_error(dev_priv, 0, 249 i915_handle_error(dev_priv, BIT(engine->id),
250 "Kicking stuck wait on %s", 250 "Kicking stuck wait on %s",
251 engine->name); 251 engine->name);
252 I915_WRITE_CTL(engine, tmp); 252 I915_WRITE_CTL(engine, tmp);
@@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
258 default: 258 default:
259 return ENGINE_DEAD; 259 return ENGINE_DEAD;
260 case 1: 260 case 1:
261 i915_handle_error(dev_priv, 0, 261 i915_handle_error(dev_priv, ALL_ENGINES,
262 "Kicking stuck semaphore on %s", 262 "Kicking stuck semaphore on %s",
263 engine->name); 263 engine->name);
264 I915_WRITE_CTL(engine, tmp); 264 I915_WRITE_CTL(engine, tmp);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7ece2f061b9e..e0fca035ff78 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
719 struct rb_node *rb; 719 struct rb_node *rb;
720 unsigned long flags; 720 unsigned long flags;
721 721
722 GEM_TRACE("%s\n", engine->name);
723
722 spin_lock_irqsave(&engine->timeline->lock, flags); 724 spin_lock_irqsave(&engine->timeline->lock, flags);
723 725
724 /* Cancel the requests on the HW and clear the ELSP tracker. */ 726 /* Cancel the requests on the HW and clear the ELSP tracker. */
@@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
765 */ 767 */
766 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 768 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
767 769
770 /* Mark all CS interrupts as complete */
771 execlists->active = 0;
772
768 spin_unlock_irqrestore(&engine->timeline->lock, flags); 773 spin_unlock_irqrestore(&engine->timeline->lock, flags);
769} 774}
770 775
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9a9961802f5c..e83af0f2be86 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
225 struct drm_crtc_state *old_crtc_state) 225 struct drm_crtc_state *old_crtc_state)
226{ 226{
227 drm_crtc_vblank_on(crtc); 227 drm_crtc_vblank_on(crtc);
228}
228 229
230static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
231 struct drm_crtc_state *old_crtc_state)
232{
229 spin_lock_irq(&crtc->dev->event_lock); 233 spin_lock_irq(&crtc->dev->event_lock);
230 if (crtc->state->event) { 234 if (crtc->state->event) {
231 WARN_ON(drm_crtc_vblank_get(crtc)); 235 WARN_ON(drm_crtc_vblank_get(crtc));
@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
293 .mode_set_nofb = ipu_crtc_mode_set_nofb, 297 .mode_set_nofb = ipu_crtc_mode_set_nofb,
294 .atomic_check = ipu_crtc_atomic_check, 298 .atomic_check = ipu_crtc_atomic_check,
295 .atomic_begin = ipu_crtc_atomic_begin, 299 .atomic_begin = ipu_crtc_atomic_begin,
300 .atomic_flush = ipu_crtc_atomic_flush,
296 .atomic_disable = ipu_crtc_atomic_disable, 301 .atomic_disable = ipu_crtc_atomic_disable,
297 .atomic_enable = ipu_crtc_atomic_enable, 302 .atomic_enable = ipu_crtc_atomic_enable,
298}; 303};
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 57ed56d8623f..d9113faaa62f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -22,6 +22,7 @@
22#include <drm/drm_plane_helper.h> 22#include <drm/drm_plane_helper.h>
23 23
24#include "video/imx-ipu-v3.h" 24#include "video/imx-ipu-v3.h"
25#include "imx-drm.h"
25#include "ipuv3-plane.h" 26#include "ipuv3-plane.h"
26 27
27struct ipu_plane_state { 28struct ipu_plane_state {
@@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
272 kfree(ipu_plane); 273 kfree(ipu_plane);
273} 274}
274 275
275void ipu_plane_state_reset(struct drm_plane *plane) 276static void ipu_plane_state_reset(struct drm_plane *plane)
276{ 277{
277 struct ipu_plane_state *ipu_state; 278 struct ipu_plane_state *ipu_state;
278 279
@@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)
292 plane->state = &ipu_state->base; 293 plane->state = &ipu_state->base;
293} 294}
294 295
295struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane) 296static struct drm_plane_state *
297ipu_plane_duplicate_state(struct drm_plane *plane)
296{ 298{
297 struct ipu_plane_state *state; 299 struct ipu_plane_state *state;
298 300
@@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
306 return &state->base; 308 return &state->base;
307} 309}
308 310
309void ipu_plane_destroy_state(struct drm_plane *plane, 311static void ipu_plane_destroy_state(struct drm_plane *plane,
310 struct drm_plane_state *state) 312 struct drm_plane_state *state)
311{ 313{
312 struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); 314 struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
313 315
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 380f340204e8..debbbf0fd4bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nvif_object *device = &drm->client.device.object; 136 struct nvif_object *device = &drm->client.device.object;
137 int or = nv_encoder->or; 137 int or = ffs(nv_encoder->dcb->or) - 1;
138 u32 div = 1025; 138 u32 div = 1025;
139 u32 val; 139 u32 val;
140 140
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
149 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 149 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
151 struct nvif_object *device = &drm->client.device.object; 151 struct nvif_object *device = &drm->client.device.object;
152 int or = nv_encoder->or; 152 int or = ffs(nv_encoder->dcb->or) - 1;
153 u32 div = 1025; 153 u32 div = 1025;
154 u32 val = (bd->props.brightness * div) / 100; 154 u32 val = (bd->props.brightness * div) / 100;
155 155
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
170 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 170 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
172 struct nvif_object *device = &drm->client.device.object; 172 struct nvif_object *device = &drm->client.device.object;
173 int or = nv_encoder->or; 173 int or = ffs(nv_encoder->dcb->or) - 1;
174 u32 div, val; 174 u32 div, val;
175 175
176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
188 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 188 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
190 struct nvif_object *device = &drm->client.device.object; 190 struct nvif_object *device = &drm->client.device.object;
191 int or = nv_encoder->or; 191 int or = ffs(nv_encoder->dcb->or) - 1;
192 u32 div, val; 192 u32 div, val;
193 193
194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
228 return -ENODEV; 228 return -ENODEV;
229 } 229 }
230 230
231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
232 return 0; 232 return 0;
233 233
234 if (drm->client.device.info.chipset <= 0xa0 || 234 if (drm->client.device.info.chipset <= 0xa0 ||
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
268 struct nvif_device *device = &drm->client.device; 268 struct nvif_device *device = &drm->client.device;
269 struct drm_connector *connector; 269 struct drm_connector *connector;
270 270
271 INIT_LIST_HEAD(&drm->bl_connectors);
272
271 if (apple_gmux_present()) { 273 if (apple_gmux_present()) {
272 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); 274 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
273 return 0; 275 return 0;
274 } 276 }
275 277
276 INIT_LIST_HEAD(&drm->bl_connectors);
277
278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
280 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 280 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 93946dcee319..1c12e58f44c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1354 1354
1355 tail = this->addr + this->size; 1355 tail = this->addr + this->size;
1356 if (vmm->func->page_block && next && next->page != p) 1356 if (vmm->func->page_block && next && next->page != p)
1357 tail = ALIGN_DOWN(addr, vmm->func->page_block); 1357 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1358 1358
1359 if (addr <= tail && tail - addr >= size) { 1359 if (addr <= tail && tail - addr >= size) {
1360 rb_erase(&this->tree, &vmm->free); 1360 rb_erase(&this->tree, &vmm->free);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index d3045a371a55..7c73bc7e2f85 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
3221 case CHIP_KAVERI: 3221 case CHIP_KAVERI:
3222 rdev->config.cik.max_shader_engines = 1; 3222 rdev->config.cik.max_shader_engines = 1;
3223 rdev->config.cik.max_tile_pipes = 4; 3223 rdev->config.cik.max_tile_pipes = 4;
3224 if ((rdev->pdev->device == 0x1304) || 3224 rdev->config.cik.max_cu_per_sh = 8;
3225 (rdev->pdev->device == 0x1305) || 3225 rdev->config.cik.max_backends_per_se = 2;
3226 (rdev->pdev->device == 0x130C) ||
3227 (rdev->pdev->device == 0x130F) ||
3228 (rdev->pdev->device == 0x1310) ||
3229 (rdev->pdev->device == 0x1311) ||
3230 (rdev->pdev->device == 0x131C)) {
3231 rdev->config.cik.max_cu_per_sh = 8;
3232 rdev->config.cik.max_backends_per_se = 2;
3233 } else if ((rdev->pdev->device == 0x1309) ||
3234 (rdev->pdev->device == 0x130A) ||
3235 (rdev->pdev->device == 0x130D) ||
3236 (rdev->pdev->device == 0x1313) ||
3237 (rdev->pdev->device == 0x131D)) {
3238 rdev->config.cik.max_cu_per_sh = 6;
3239 rdev->config.cik.max_backends_per_se = 2;
3240 } else if ((rdev->pdev->device == 0x1306) ||
3241 (rdev->pdev->device == 0x1307) ||
3242 (rdev->pdev->device == 0x130B) ||
3243 (rdev->pdev->device == 0x130E) ||
3244 (rdev->pdev->device == 0x1315) ||
3245 (rdev->pdev->device == 0x1318) ||
3246 (rdev->pdev->device == 0x131B)) {
3247 rdev->config.cik.max_cu_per_sh = 4;
3248 rdev->config.cik.max_backends_per_se = 1;
3249 } else {
3250 rdev->config.cik.max_cu_per_sh = 3;
3251 rdev->config.cik.max_backends_per_se = 1;
3252 }
3253 rdev->config.cik.max_sh_per_se = 1; 3226 rdev->config.cik.max_sh_per_se = 1;
3254 rdev->config.cik.max_texture_channel_caches = 4; 3227 rdev->config.cik.max_texture_channel_caches = 4;
3255 rdev->config.cik.max_gprs = 256; 3228 rdev->config.cik.max_gprs = 256;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2e2ca3c6b47d..df9469a8fdb1 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
90 /* don't do anything if sink is not display port, i.e., 90 /* don't do anything if sink is not display port, i.e.,
91 * passive dp->(dvi|hdmi) adaptor 91 * passive dp->(dvi|hdmi) adaptor
92 */ 92 */
93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
94 int saved_dpms = connector->dpms; 94 radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
95 /* Only turn off the display if it's physically disconnected */ 95 radeon_dp_needs_link_train(radeon_connector)) {
96 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 96 /* Don't start link training before we have the DPCD */
97 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 97 if (!radeon_dp_getdpcd(radeon_connector))
98 } else if (radeon_dp_needs_link_train(radeon_connector)) { 98 return;
99 /* Don't try to start link training before we 99
100 * have the dpcd */ 100 /* Turn the connector off and back on immediately, which
101 if (!radeon_dp_getdpcd(radeon_connector)) 101 * will trigger link training
102 return; 102 */
103 103 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
104 /* set it to OFF so that drm_helper_connector_dpms() 104 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
105 * won't return immediately since the current state
106 * is ON at this point.
107 */
108 connector->dpms = DRM_MODE_DPMS_OFF;
109 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
110 }
111 connector->dpms = saved_dpms;
112 } 105 }
113 } 106 }
114} 107}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a9962ffba720..27d8e7dd2d06 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 35
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj); 37 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj); 38 radeon_bo_unref(&robj);
41 } 39 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 15404af9d740..31f5ad605e59 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82 mutex_unlock(&bo->rdev->gem.mutex); 82 mutex_unlock(&bo->rdev->gem.mutex);
83 radeon_bo_clear_surface_reg(bo); 83 radeon_bo_clear_surface_reg(bo);
84 WARN_ON_ONCE(!list_empty(&bo->va)); 84 WARN_ON_ONCE(!list_empty(&bo->va));
85 if (bo->gem_base.import_attach)
86 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
85 drm_gem_object_release(&bo->gem_base); 87 drm_gem_object_release(&bo->gem_base);
86 kfree(bo); 88 kfree(bo);
87} 89}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 5decae0069d0..78cbc3145e44 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
93 93
94 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 94 DRM_DEBUG_DRIVER("Disabling the CRTC\n");
95 95
96 drm_crtc_vblank_off(crtc);
97
96 sun4i_tcon_set_status(scrtc->tcon, encoder, false); 98 sun4i_tcon_set_status(scrtc->tcon, encoder, false);
97 99
98 if (crtc->state->event && !crtc->state->active) { 100 if (crtc->state->event && !crtc->state->active) {
@@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
113 DRM_DEBUG_DRIVER("Enabling the CRTC\n"); 115 DRM_DEBUG_DRIVER("Enabling the CRTC\n");
114 116
115 sun4i_tcon_set_status(scrtc->tcon, encoder, true); 117 sun4i_tcon_set_status(scrtc->tcon, encoder, true);
118
119 drm_crtc_vblank_on(crtc);
116} 120}
117 121
118static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) 122static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index 023f39bda633..e36004fbe453 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
132static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) 132static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
133{ 133{
134 struct sun4i_dclk *dclk = hw_to_dclk(hw); 134 struct sun4i_dclk *dclk = hw_to_dclk(hw);
135 u32 val = degrees / 120;
136
137 val <<= 28;
135 138
136 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, 139 regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
137 GENMASK(29, 28), 140 GENMASK(29, 28),
138 degrees / 120); 141 val);
139 142
140 return 0; 143 return 0;
141} 144}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 4570da0227b4..d9a71f361b14 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)
111 /* drm_vblank_init calls kcalloc, which can fail */ 111 /* drm_vblank_init calls kcalloc, which can fail */
112 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 112 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
113 if (ret) 113 if (ret)
114 goto free_mem_region; 114 goto cleanup_mode_config;
115 115
116 drm->irq_enabled = true; 116 drm->irq_enabled = true;
117 117
@@ -139,7 +139,6 @@ finish_poll:
139 sun4i_framebuffer_free(drm); 139 sun4i_framebuffer_free(drm);
140cleanup_mode_config: 140cleanup_mode_config:
141 drm_mode_config_cleanup(drm); 141 drm_mode_config_cleanup(drm);
142free_mem_region:
143 of_reserved_mem_device_release(dev); 142 of_reserved_mem_device_release(dev);
144free_drm: 143free_drm:
145 drm_dev_unref(drm); 144 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 500b6fb3e028..fa4bcd092eaf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
538 &sun4i_hdmi_regmap_config); 538 &sun4i_hdmi_regmap_config);
539 if (IS_ERR(hdmi->regmap)) { 539 if (IS_ERR(hdmi->regmap)) {
540 dev_err(dev, "Couldn't create HDMI encoder regmap\n"); 540 dev_err(dev, "Couldn't create HDMI encoder regmap\n");
541 return PTR_ERR(hdmi->regmap); 541 ret = PTR_ERR(hdmi->regmap);
542 goto err_disable_mod_clk;
542 } 543 }
543 544
544 ret = sun4i_tmds_create(hdmi); 545 ret = sun4i_tmds_create(hdmi);
@@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
551 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc"); 552 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
552 if (IS_ERR(hdmi->ddc_parent_clk)) { 553 if (IS_ERR(hdmi->ddc_parent_clk)) {
553 dev_err(dev, "Couldn't get the HDMI DDC clock\n"); 554 dev_err(dev, "Couldn't get the HDMI DDC clock\n");
554 return PTR_ERR(hdmi->ddc_parent_clk); 555 ret = PTR_ERR(hdmi->ddc_parent_clk);
556 goto err_disable_mod_clk;
555 } 557 }
556 } else { 558 } else {
557 hdmi->ddc_parent_clk = hdmi->tmds_clk; 559 hdmi->ddc_parent_clk = hdmi->tmds_clk;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 832f8f9bc47f..b8da5a50a61d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
92 92
93 DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 93 DRM_DEBUG_DRIVER("Vertical parameters OK\n");
94 94
95 tcon->dclk_min_div = 6;
96 tcon->dclk_max_div = 127;
95 rounded_rate = clk_round_rate(tcon->dclk, rate); 97 rounded_rate = clk_round_rate(tcon->dclk, rate);
96 if (rounded_rate < rate) 98 if (rounded_rate < rate)
97 return MODE_CLOCK_LOW; 99 return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index b3960118deb9..a818ca491605 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -101,10 +101,13 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
101 return; 101 return;
102 } 102 }
103 103
104 if (enabled) 104 if (enabled) {
105 clk_prepare_enable(clk); 105 clk_prepare_enable(clk);
106 else 106 clk_rate_exclusive_get(clk);
107 } else {
108 clk_rate_exclusive_put(clk);
107 clk_disable_unprepare(clk); 109 clk_disable_unprepare(clk);
110 }
108} 111}
109 112
110static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, 113static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
@@ -260,7 +263,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
260 const struct drm_display_mode *mode) 263 const struct drm_display_mode *mode)
261{ 264{
262 /* Configure the dot clock */ 265 /* Configure the dot clock */
263 clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000); 266 clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
264 267
265 /* Set the resolution */ 268 /* Set the resolution */
266 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, 269 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -421,7 +424,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
421 WARN_ON(!tcon->quirks->has_channel_1); 424 WARN_ON(!tcon->quirks->has_channel_1);
422 425
423 /* Configure the dot clock */ 426 /* Configure the dot clock */
424 clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000); 427 clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
425 428
426 /* Adjust clock delay */ 429 /* Adjust clock delay */
427 clk_delay = sun4i_tcon_get_clk_delay(mode, 1); 430 clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
@@ -873,52 +876,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
873 return ret; 876 return ret;
874 } 877 }
875 878
876 /* 879 if (tcon->quirks->supports_lvds) {
877 * This can only be made optional since we've had DT nodes 880 /*
878 * without the LVDS reset properties. 881 * This can only be made optional since we've had DT
879 * 882 * nodes without the LVDS reset properties.
880 * If the property is missing, just disable LVDS, and print a 883 *
881 * warning. 884 * If the property is missing, just disable LVDS, and
882 */ 885 * print a warning.
883 tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); 886 */
884 if (IS_ERR(tcon->lvds_rst)) { 887 tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
885 dev_err(dev, "Couldn't get our reset line\n"); 888 if (IS_ERR(tcon->lvds_rst)) {
886 return PTR_ERR(tcon->lvds_rst); 889 dev_err(dev, "Couldn't get our reset line\n");
887 } else if (tcon->lvds_rst) { 890 return PTR_ERR(tcon->lvds_rst);
888 has_lvds_rst = true; 891 } else if (tcon->lvds_rst) {
889 reset_control_reset(tcon->lvds_rst); 892 has_lvds_rst = true;
890 } else { 893 reset_control_reset(tcon->lvds_rst);
891 has_lvds_rst = false; 894 } else {
892 } 895 has_lvds_rst = false;
896 }
893 897
894 /* 898 /*
895 * This can only be made optional since we've had DT nodes 899 * This can only be made optional since we've had DT
896 * without the LVDS reset properties. 900 * nodes without the LVDS reset properties.
897 * 901 *
898 * If the property is missing, just disable LVDS, and print a 902 * If the property is missing, just disable LVDS, and
899 * warning. 903 * print a warning.
900 */ 904 */
901 if (tcon->quirks->has_lvds_alt) { 905 if (tcon->quirks->has_lvds_alt) {
902 tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); 906 tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
903 if (IS_ERR(tcon->lvds_pll)) { 907 if (IS_ERR(tcon->lvds_pll)) {
904 if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { 908 if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
905 has_lvds_alt = false; 909 has_lvds_alt = false;
910 } else {
911 dev_err(dev, "Couldn't get the LVDS PLL\n");
912 return PTR_ERR(tcon->lvds_pll);
913 }
906 } else { 914 } else {
907 dev_err(dev, "Couldn't get the LVDS PLL\n"); 915 has_lvds_alt = true;
908 return PTR_ERR(tcon->lvds_pll);
909 } 916 }
910 } else {
911 has_lvds_alt = true;
912 } 917 }
913 }
914 918
915 if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { 919 if (!has_lvds_rst ||
916 dev_warn(dev, 920 (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
917 "Missing LVDS properties, Please upgrade your DT\n"); 921 dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
918 dev_warn(dev, "LVDS output disabled\n"); 922 dev_warn(dev, "LVDS output disabled\n");
919 can_lvds = false; 923 can_lvds = false;
924 } else {
925 can_lvds = true;
926 }
920 } else { 927 } else {
921 can_lvds = true; 928 can_lvds = false;
922 } 929 }
923 930
924 ret = sun4i_tcon_init_clocks(dev, tcon); 931 ret = sun4i_tcon_init_clocks(dev, tcon);
@@ -1137,7 +1144,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
1137}; 1144};
1138 1145
1139static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { 1146static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
1140 /* nothing is supported */ 1147 .supports_lvds = true,
1141}; 1148};
1142 1149
1143static const struct sun4i_tcon_quirks sun8i_v3s_quirks = { 1150static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b761c7b823c5..278700c7bf9f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -175,6 +175,7 @@ struct sun4i_tcon_quirks {
175 bool has_channel_1; /* a33 does not have channel 1 */ 175 bool has_channel_1; /* a33 does not have channel 1 */
176 bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ 176 bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
177 bool needs_de_be_mux; /* sun6i needs mux to select backend */ 177 bool needs_de_be_mux; /* sun6i needs mux to select backend */
178 bool supports_lvds; /* Does the TCON support an LVDS output? */
178 179
179 /* callback to handle tcon muxing options */ 180 /* callback to handle tcon muxing options */
180 int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); 181 int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b8403ed48285..fbffe1948b3b 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1903,8 +1903,12 @@ cleanup:
1903 if (!IS_ERR(primary)) 1903 if (!IS_ERR(primary))
1904 drm_plane_cleanup(primary); 1904 drm_plane_cleanup(primary);
1905 1905
1906 if (group && tegra->domain) { 1906 if (group && dc->domain) {
1907 iommu_detach_group(tegra->domain, group); 1907 if (group == tegra->group) {
1908 iommu_detach_group(dc->domain, group);
1909 tegra->group = NULL;
1910 }
1911
1908 dc->domain = NULL; 1912 dc->domain = NULL;
1909 } 1913 }
1910 1914
@@ -1913,8 +1917,10 @@ cleanup:
1913 1917
1914static int tegra_dc_exit(struct host1x_client *client) 1918static int tegra_dc_exit(struct host1x_client *client)
1915{ 1919{
1920 struct drm_device *drm = dev_get_drvdata(client->parent);
1916 struct iommu_group *group = iommu_group_get(client->dev); 1921 struct iommu_group *group = iommu_group_get(client->dev);
1917 struct tegra_dc *dc = host1x_client_to_dc(client); 1922 struct tegra_dc *dc = host1x_client_to_dc(client);
1923 struct tegra_drm *tegra = drm->dev_private;
1918 int err; 1924 int err;
1919 1925
1920 devm_free_irq(dc->dev, dc->irq, dc); 1926 devm_free_irq(dc->dev, dc->irq, dc);
@@ -1926,7 +1932,11 @@ static int tegra_dc_exit(struct host1x_client *client)
1926 } 1932 }
1927 1933
1928 if (group && dc->domain) { 1934 if (group && dc->domain) {
1929 iommu_detach_group(dc->domain, group); 1935 if (group == tegra->group) {
1936 iommu_detach_group(dc->domain, group);
1937 tegra->group = NULL;
1938 }
1939
1930 dc->domain = NULL; 1940 dc->domain = NULL;
1931 } 1941 }
1932 1942
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d50bddb2e447..7fcf4a242840 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)
250 250
251 drm_kms_helper_poll_fini(drm); 251 drm_kms_helper_poll_fini(drm);
252 tegra_drm_fb_exit(drm); 252 tegra_drm_fb_exit(drm);
253 drm_atomic_helper_shutdown(drm);
253 drm_mode_config_cleanup(drm); 254 drm_mode_config_cleanup(drm);
254 255
255 err = host1x_device_exit(device); 256 err = host1x_device_exit(device);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 4d2ed966f9e3..87c5d89bc9ba 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client)
1072 struct tegra_dsi *dsi = host1x_client_to_dsi(client); 1072 struct tegra_dsi *dsi = host1x_client_to_dsi(client);
1073 1073
1074 tegra_output_exit(&dsi->output); 1074 tegra_output_exit(&dsi->output);
1075 regulator_disable(dsi->vdd);
1076 1075
1077 return 0; 1076 return 0;
1078} 1077}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 36a06a993698..94dac79ac3c9 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -297,6 +297,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
297 case WIN_COLOR_DEPTH_B8G8R8X8: 297 case WIN_COLOR_DEPTH_B8G8R8X8:
298 *alpha = WIN_COLOR_DEPTH_B8G8R8A8; 298 *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
299 return 0; 299 return 0;
300
301 case WIN_COLOR_DEPTH_B5G6R5:
302 *alpha = opaque;
303 return 0;
300 } 304 }
301 305
302 return -EINVAL; 306 return -EINVAL;
@@ -330,9 +334,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
330 unsigned int zpos[2]; 334 unsigned int zpos[2];
331 unsigned int i; 335 unsigned int i;
332 336
333 for (i = 0; i < 3; i++)
334 state->dependent[i] = false;
335
336 for (i = 0; i < 2; i++) 337 for (i = 0; i < 2; i++)
337 zpos[i] = 0; 338 zpos[i] = 0;
338 339
@@ -346,6 +347,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
346 347
347 index = tegra_plane_get_overlap_index(tegra, p); 348 index = tegra_plane_get_overlap_index(tegra, p);
348 349
350 state->dependent[index] = false;
351
349 /* 352 /*
350 * If any of the other planes is on top of this plane and uses 353 * If any of the other planes is on top of this plane and uses
351 * a format with an alpha component, mark this plane as being 354 * a format with an alpha component, mark this plane as being
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index b5b335c9b2bb..2ebdc6d5a76e 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
159{ 159{
160 unsigned long start = vma->vm_start; 160 unsigned long start = vma->vm_start;
161 unsigned long size = vma->vm_end - vma->vm_start; 161 unsigned long size = vma->vm_end - vma->vm_start;
162 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 162 unsigned long offset;
163 unsigned long page, pos; 163 unsigned long page, pos;
164 164
165 if (offset + size > info->fix.smem_len) 165 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
166 return -EINVAL;
167
168 offset = vma->vm_pgoff << PAGE_SHIFT;
169
170 if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
166 return -EINVAL; 171 return -EINVAL;
167 172
168 pos = (unsigned long)info->fix.smem_start + offset; 173 pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d486c3..86d25f18aa99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
1337 */ 1337 */
1338void vmw_svga_disable(struct vmw_private *dev_priv) 1338void vmw_svga_disable(struct vmw_private *dev_priv)
1339{ 1339{
1340 /*
1341 * Disabling SVGA will turn off device modesetting capabilities, so
1342 * notify KMS about that so that it doesn't cache atomic state that
1343 * isn't valid anymore, for example crtcs turned on.
1344 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1345 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1346 * end up with lock order reversal. Thus, a master may actually perform
1347 * a new modeset just after we call vmw_kms_lost_device() and race with
1348 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1349 * to be inconsistent with the device, causing modesetting problems.
1350 *
1351 */
1352 vmw_kms_lost_device(dev_priv->dev);
1340 ttm_write_lock(&dev_priv->reservation_sem, false); 1353 ttm_write_lock(&dev_priv->reservation_sem, false);
1341 spin_lock(&dev_priv->svga_lock); 1354 spin_lock(&dev_priv->svga_lock);
1342 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1355 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d08753e8fd94..9116fe8baebc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
938int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 938int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
939 struct drm_file *file_priv); 939 struct drm_file *file_priv);
940void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); 940void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
941void vmw_kms_lost_device(struct drm_device *dev);
941 942
942int vmw_dumb_create(struct drm_file *file_priv, 943int vmw_dumb_create(struct drm_file *file_priv,
943 struct drm_device *dev, 944 struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ead61015cd79..3c824fd7cbf3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,7 +31,6 @@
31#include <drm/drm_atomic_helper.h> 31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_rect.h> 32#include <drm/drm_rect.h>
33 33
34
35/* Might need a hrtimer here? */ 34/* Might need a hrtimer here? */
36#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 35#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
37 36
@@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2517 * Helper to be used if an error forces the caller to undo the actions of 2516 * Helper to be used if an error forces the caller to undo the actions of
2518 * vmw_kms_helper_resource_prepare. 2517 * vmw_kms_helper_resource_prepare.
2519 */ 2518 */
2520void vmw_kms_helper_resource_revert(struct vmw_resource *res) 2519void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2521{ 2520{
2522 vmw_kms_helper_buffer_revert(res->backup); 2521 struct vmw_resource *res = ctx->res;
2522
2523 vmw_kms_helper_buffer_revert(ctx->buf);
2524 vmw_dmabuf_unreference(&ctx->buf);
2523 vmw_resource_unreserve(res, false, NULL, 0); 2525 vmw_resource_unreserve(res, false, NULL, 0);
2524 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2526 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2525} 2527}
@@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
2536 * interrupted by a signal. 2538 * interrupted by a signal.
2537 */ 2539 */
2538int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 2540int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2539 bool interruptible) 2541 bool interruptible,
2542 struct vmw_validation_ctx *ctx)
2540{ 2543{
2541 int ret = 0; 2544 int ret = 0;
2542 2545
2546 ctx->buf = NULL;
2547 ctx->res = res;
2548
2543 if (interruptible) 2549 if (interruptible)
2544 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); 2550 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2545 else 2551 else
@@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2558 res->dev_priv->has_mob); 2564 res->dev_priv->has_mob);
2559 if (ret) 2565 if (ret)
2560 goto out_unreserve; 2566 goto out_unreserve;
2567
2568 ctx->buf = vmw_dmabuf_reference(res->backup);
2561 } 2569 }
2562 ret = vmw_resource_validate(res); 2570 ret = vmw_resource_validate(res);
2563 if (ret) 2571 if (ret)
@@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2565 return 0; 2573 return 0;
2566 2574
2567out_revert: 2575out_revert:
2568 vmw_kms_helper_buffer_revert(res->backup); 2576 vmw_kms_helper_buffer_revert(ctx->buf);
2569out_unreserve: 2577out_unreserve:
2570 vmw_resource_unreserve(res, false, NULL, 0); 2578 vmw_resource_unreserve(res, false, NULL, 0);
2571out_unlock: 2579out_unlock:
@@ -2581,11 +2589,13 @@ out_unlock:
2581 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2589 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2582 * ref-counted fence pointer is returned here. 2590 * ref-counted fence pointer is returned here.
2583 */ 2591 */
2584void vmw_kms_helper_resource_finish(struct vmw_resource *res, 2592void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2585 struct vmw_fence_obj **out_fence) 2593 struct vmw_fence_obj **out_fence)
2586{ 2594{
2587 if (res->backup || out_fence) 2595 struct vmw_resource *res = ctx->res;
2588 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, 2596
2597 if (ctx->buf || out_fence)
2598 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2589 out_fence, NULL); 2599 out_fence, NULL);
2590 2600
2591 vmw_resource_unreserve(res, false, NULL, 0); 2601 vmw_resource_unreserve(res, false, NULL, 0);
@@ -2851,3 +2861,14 @@ int vmw_kms_set_config(struct drm_mode_set *set,
2851 2861
2852 return drm_atomic_helper_set_config(set, ctx); 2862 return drm_atomic_helper_set_config(set, ctx);
2853} 2863}
2864
2865
2866/**
2867 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2868 *
2869 * @dev: Pointer to the drm device
2870 */
2871void vmw_kms_lost_device(struct drm_device *dev)
2872{
2873 drm_atomic_helper_shutdown(dev);
2874}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index cd9da2dd79af..3d2ca280eaa7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -240,6 +240,11 @@ struct vmw_display_unit {
240 int set_gui_y; 240 int set_gui_y;
241}; 241};
242 242
243struct vmw_validation_ctx {
244 struct vmw_resource *res;
245 struct vmw_dma_buffer *buf;
246};
247
243#define vmw_crtc_to_du(x) \ 248#define vmw_crtc_to_du(x) \
244 container_of(x, struct vmw_display_unit, crtc) 249 container_of(x, struct vmw_display_unit, crtc)
245#define vmw_connector_to_du(x) \ 250#define vmw_connector_to_du(x) \
@@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
296 struct drm_vmw_fence_rep __user * 301 struct drm_vmw_fence_rep __user *
297 user_fence_rep); 302 user_fence_rep);
298int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 303int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
299 bool interruptible); 304 bool interruptible,
300void vmw_kms_helper_resource_revert(struct vmw_resource *res); 305 struct vmw_validation_ctx *ctx);
301void vmw_kms_helper_resource_finish(struct vmw_resource *res, 306void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
307void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
302 struct vmw_fence_obj **out_fence); 308 struct vmw_fence_obj **out_fence);
303int vmw_kms_readback(struct vmw_private *dev_priv, 309int vmw_kms_readback(struct vmw_private *dev_priv,
304 struct drm_file *file_priv, 310 struct drm_file *file_priv,
@@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
439 445
440int vmw_kms_set_config(struct drm_mode_set *set, 446int vmw_kms_set_config(struct drm_mode_set *set,
441 struct drm_modeset_acquire_ctx *ctx); 447 struct drm_modeset_acquire_ctx *ctx);
442
443#endif 448#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 63a4cd794b73..3ec9eae831b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
909 struct vmw_framebuffer_surface *vfbs = 909 struct vmw_framebuffer_surface *vfbs =
910 container_of(framebuffer, typeof(*vfbs), base); 910 container_of(framebuffer, typeof(*vfbs), base);
911 struct vmw_kms_sou_surface_dirty sdirty; 911 struct vmw_kms_sou_surface_dirty sdirty;
912 struct vmw_validation_ctx ctx;
912 int ret; 913 int ret;
913 914
914 if (!srf) 915 if (!srf)
915 srf = &vfbs->surface->res; 916 srf = &vfbs->surface->res;
916 917
917 ret = vmw_kms_helper_resource_prepare(srf, true); 918 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
918 if (ret) 919 if (ret)
919 return ret; 920 return ret;
920 921
@@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
933 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 934 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
934 dest_x, dest_y, num_clips, inc, 935 dest_x, dest_y, num_clips, inc,
935 &sdirty.base); 936 &sdirty.base);
936 vmw_kms_helper_resource_finish(srf, out_fence); 937 vmw_kms_helper_resource_finish(&ctx, out_fence);
937 938
938 return ret; 939 return ret;
939} 940}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b68d74888ab1..6b969e5dea2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
980 struct vmw_framebuffer_surface *vfbs = 980 struct vmw_framebuffer_surface *vfbs =
981 container_of(framebuffer, typeof(*vfbs), base); 981 container_of(framebuffer, typeof(*vfbs), base);
982 struct vmw_stdu_dirty sdirty; 982 struct vmw_stdu_dirty sdirty;
983 struct vmw_validation_ctx ctx;
983 int ret; 984 int ret;
984 985
985 if (!srf) 986 if (!srf)
986 srf = &vfbs->surface->res; 987 srf = &vfbs->surface->res;
987 988
988 ret = vmw_kms_helper_resource_prepare(srf, true); 989 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
989 if (ret) 990 if (ret)
990 return ret; 991 return ret;
991 992
@@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
1008 dest_x, dest_y, num_clips, inc, 1009 dest_x, dest_y, num_clips, inc,
1009 &sdirty.base); 1010 &sdirty.base);
1010out_finish: 1011out_finish:
1011 vmw_kms_helper_resource_finish(srf, out_fence); 1012 vmw_kms_helper_resource_finish(&ctx, out_fence);
1012 1013
1013 return ret; 1014 return ret;
1014} 1015}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 97b99500153d..83f9dd934a5d 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -250,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
250{ 250{
251 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); 251 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
252 struct ipu_prg *prg = ipu_chan->ipu->prg_priv; 252 struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
253 struct ipu_prg_channel *chan = &prg->chan[prg_chan]; 253 struct ipu_prg_channel *chan;
254 u32 val; 254 u32 val;
255 255
256 if (!chan->enabled || prg_chan < 0) 256 if (prg_chan < 0)
257 return;
258
259 chan = &prg->chan[prg_chan];
260 if (!chan->enabled)
257 return; 261 return;
258 262
259 pm_runtime_get_sync(prg->dev); 263 pm_runtime_get_sync(prg->dev);
@@ -280,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
280{ 284{
281 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); 285 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
282 struct ipu_prg *prg = ipu_chan->ipu->prg_priv; 286 struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
283 struct ipu_prg_channel *chan = &prg->chan[prg_chan]; 287 struct ipu_prg_channel *chan;
284 u32 val; 288 u32 val;
285 int ret; 289 int ret;
286 290
287 if (prg_chan < 0) 291 if (prg_chan < 0)
288 return prg_chan; 292 return prg_chan;
289 293
294 chan = &prg->chan[prg_chan];
295
290 if (chan->enabled) { 296 if (chan->enabled) {
291 ipu_pre_update(prg->pres[chan->used_pre], *eba); 297 ipu_pre_update(prg->pres[chan->used_pre], *eba);
292 return 0; 298 return 0;
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5b4cf030c11..9183d148d644 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in,
550 dst_release(dst); 550 dst_release(dst);
551 } 551 }
552 552
553 if (ndev->flags & IFF_LOOPBACK) { 553 if (ndev) {
554 ret = rdma_translate_ip(dst_in, addr); 554 if (ndev->flags & IFF_LOOPBACK)
555 /* 555 ret = rdma_translate_ip(dst_in, addr);
556 * Put the loopback device and get the translated 556 else
557 * device instead. 557 addr->bound_dev_if = ndev->ifindex;
558 */
559 dev_put(ndev); 558 dev_put(ndev);
560 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
561 } else {
562 addr->bound_dev_if = ndev->ifindex;
563 } 559 }
564 dev_put(ndev);
565 560
566 return ret; 561 return ret;
567} 562}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 915bbd867b61..66f203730e80 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3069 continue; 3069 continue;
3070 3070
3071 /* different dest port -> unique */ 3071 /* different dest port -> unique */
3072 if (!cma_any_port(cur_daddr) && 3072 if (!cma_any_port(daddr) &&
3073 !cma_any_port(cur_daddr) &&
3073 (dport != cur_dport)) 3074 (dport != cur_dport))
3074 continue; 3075 continue;
3075 3076
@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3080 continue; 3081 continue;
3081 3082
3082 /* different dst address -> unique */ 3083 /* different dst address -> unique */
3083 if (!cma_any_addr(cur_daddr) && 3084 if (!cma_any_addr(daddr) &&
3085 !cma_any_addr(cur_daddr) &&
3084 cma_addr_cmp(daddr, cur_daddr)) 3086 cma_addr_cmp(daddr, cur_daddr))
3085 continue; 3087 continue;
3086 3088
@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
3378 } 3380 }
3379#endif 3381#endif
3380 } 3382 }
3383 daddr = cma_dst_addr(id_priv);
3384 daddr->sa_family = addr->sa_family;
3385
3381 ret = cma_get_port(id_priv); 3386 ret = cma_get_port(id_priv);
3382 if (ret) 3387 if (ret)
3383 goto err2; 3388 goto err2;
3384 3389
3385 daddr = cma_dst_addr(id_priv);
3386 daddr->sa_family = addr->sa_family;
3387
3388 return 0; 3390 return 0;
3389err2: 3391err2:
3390 if (id_priv->cma_dev) 3392 if (id_priv->cma_dev)
@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4173 struct cma_multicast *mc; 4175 struct cma_multicast *mc;
4174 int ret; 4176 int ret;
4175 4177
4178 if (!id->device)
4179 return -EINVAL;
4180
4176 id_priv = container_of(id, struct rdma_id_private, id); 4181 id_priv = container_of(id, struct rdma_id_private, id);
4177 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4182 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
4178 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4183 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index bc79ca8215d7..af5ad6a56ae4 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -17,6 +17,7 @@
17 17
18/* # of WCs to poll for with a single call to ib_poll_cq */ 18/* # of WCs to poll for with a single call to ib_poll_cq */
19#define IB_POLL_BATCH 16 19#define IB_POLL_BATCH 16
20#define IB_POLL_BATCH_DIRECT 8
20 21
21/* # of WCs to iterate over before yielding */ 22/* # of WCs to iterate over before yielding */
22#define IB_POLL_BUDGET_IRQ 256 23#define IB_POLL_BUDGET_IRQ 256
@@ -25,18 +26,18 @@
25#define IB_POLL_FLAGS \ 26#define IB_POLL_FLAGS \
26 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) 27 (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
27 28
28static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) 29static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
30 int batch)
29{ 31{
30 int i, n, completed = 0; 32 int i, n, completed = 0;
31 struct ib_wc *wcs = poll_wc ? : cq->wc;
32 33
33 /* 34 /*
34 * budget might be (-1) if the caller does not 35 * budget might be (-1) if the caller does not
35 * want to bound this call, thus we need unsigned 36 * want to bound this call, thus we need unsigned
36 * minimum here. 37 * minimum here.
37 */ 38 */
38 while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, 39 while ((n = ib_poll_cq(cq, min_t(u32, batch,
39 budget - completed), wcs)) > 0) { 40 budget - completed), wcs)) > 0) {
40 for (i = 0; i < n; i++) { 41 for (i = 0; i < n; i++) {
41 struct ib_wc *wc = &wcs[i]; 42 struct ib_wc *wc = &wcs[i];
42 43
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
48 49
49 completed += n; 50 completed += n;
50 51
51 if (n != IB_POLL_BATCH || 52 if (n != batch || (budget != -1 && completed >= budget))
52 (budget != -1 && completed >= budget))
53 break; 53 break;
54 } 54 }
55 55
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
72 */ 72 */
73int ib_process_cq_direct(struct ib_cq *cq, int budget) 73int ib_process_cq_direct(struct ib_cq *cq, int budget)
74{ 74{
75 struct ib_wc wcs[IB_POLL_BATCH]; 75 struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
76 76
77 return __ib_process_cq(cq, budget, wcs); 77 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
78} 78}
79EXPORT_SYMBOL(ib_process_cq_direct); 79EXPORT_SYMBOL(ib_process_cq_direct);
80 80
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); 88 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
89 int completed; 89 int completed;
90 90
91 completed = __ib_process_cq(cq, budget, NULL); 91 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
92 if (completed < budget) { 92 if (completed < budget) {
93 irq_poll_complete(&cq->iop); 93 irq_poll_complete(&cq->iop);
94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 94 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
108 struct ib_cq *cq = container_of(work, struct ib_cq, work); 108 struct ib_cq *cq = container_of(work, struct ib_cq, work);
109 int completed; 109 int completed;
110 110
111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); 111 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
112 IB_POLL_BATCH);
112 if (completed >= IB_POLL_BUDGET_WORKQUEUE || 113 if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
113 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) 114 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
114 queue_work(ib_comp_wq, &cq->work); 115 queue_work(ib_comp_wq, &cq->work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e8010e73a1cf..bb065c9449be 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device,
536 ret = device->query_device(device, &device->attrs, &uhw); 536 ret = device->query_device(device, &device->attrs, &uhw);
537 if (ret) { 537 if (ret) {
538 pr_warn("Couldn't query the device attributes\n"); 538 pr_warn("Couldn't query the device attributes\n");
539 goto cache_cleanup; 539 goto cg_cleanup;
540 } 540 }
541 541
542 ret = ib_device_register_sysfs(device, port_callback); 542 ret = ib_device_register_sysfs(device, port_callback);
543 if (ret) { 543 if (ret) {
544 pr_warn("Couldn't register device %s with driver model\n", 544 pr_warn("Couldn't register device %s with driver model\n",
545 device->name); 545 device->name);
546 goto cache_cleanup; 546 goto cg_cleanup;
547 } 547 }
548 548
549 device->reg_state = IB_DEV_REGISTERED; 549 device->reg_state = IB_DEV_REGISTERED;
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device,
559 mutex_unlock(&device_mutex); 559 mutex_unlock(&device_mutex);
560 return 0; 560 return 0;
561 561
562cg_cleanup:
563 ib_device_unregister_rdmacg(device);
562cache_cleanup: 564cache_cleanup:
563 ib_cache_cleanup_one(device); 565 ib_cache_cleanup_one(device);
564 ib_cache_release_one(device); 566 ib_cache_release_one(device);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8cf15d4a8ac4..9f029a1ca5ea 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1291 1291
1292 resolved_dev = dev_get_by_index(dev_addr.net, 1292 resolved_dev = dev_get_by_index(dev_addr.net,
1293 dev_addr.bound_dev_if); 1293 dev_addr.bound_dev_if);
1294 if (resolved_dev->flags & IFF_LOOPBACK) { 1294 if (!resolved_dev) {
1295 dev_put(resolved_dev); 1295 dev_put(idev);
1296 resolved_dev = idev; 1296 return -ENODEV;
1297 dev_hold(resolved_dev);
1298 } 1297 }
1299 ndev = ib_get_ndev_from_path(rec); 1298 ndev = ib_get_ndev_from_path(rec);
1300 rcu_read_lock(); 1299 rcu_read_lock();
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index f015f1bf88c9..e5a1e7d81326 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
132 ctx = idr_find(&ctx_idr, id); 132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx) 133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT); 134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file) 135 else if (ctx->file != file || !ctx->cm_id)
136 ctx = ERR_PTR(-EINVAL); 136 ctx = ERR_PTR(-EINVAL);
137 return ctx; 137 return ctx;
138} 138}
@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
456 struct rdma_ucm_create_id cmd; 456 struct rdma_ucm_create_id cmd;
457 struct rdma_ucm_create_id_resp resp; 457 struct rdma_ucm_create_id_resp resp;
458 struct ucma_context *ctx; 458 struct ucma_context *ctx;
459 struct rdma_cm_id *cm_id;
459 enum ib_qp_type qp_type; 460 enum ib_qp_type qp_type;
460 int ret; 461 int ret;
461 462
@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
476 return -ENOMEM; 477 return -ENOMEM;
477 478
478 ctx->uid = cmd.uid; 479 ctx->uid = cmd.uid;
479 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, 480 cm_id = rdma_create_id(current->nsproxy->net_ns,
480 ucma_event_handler, ctx, cmd.ps, qp_type); 481 ucma_event_handler, ctx, cmd.ps, qp_type);
481 if (IS_ERR(ctx->cm_id)) { 482 if (IS_ERR(cm_id)) {
482 ret = PTR_ERR(ctx->cm_id); 483 ret = PTR_ERR(cm_id);
483 goto err1; 484 goto err1;
484 } 485 }
485 486
@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
489 ret = -EFAULT; 490 ret = -EFAULT;
490 goto err2; 491 goto err2;
491 } 492 }
493
494 ctx->cm_id = cm_id;
492 return 0; 495 return 0;
493 496
494err2: 497err2:
495 rdma_destroy_id(ctx->cm_id); 498 rdma_destroy_id(cm_id);
496err1: 499err1:
497 mutex_lock(&mut); 500 mutex_lock(&mut);
498 idr_remove(&ctx_idr, ctx->id); 501 idr_remove(&ctx_idr, ctx->id);
499 mutex_unlock(&mut); 502 mutex_unlock(&mut);
503 mutex_lock(&file->mut);
504 list_del(&ctx->list);
505 mutex_unlock(&file->mut);
500 kfree(ctx); 506 kfree(ctx);
501 return ret; 507 return ret;
502} 508}
@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
664 int in_len, int out_len) 670 int in_len, int out_len)
665{ 671{
666 struct rdma_ucm_resolve_ip cmd; 672 struct rdma_ucm_resolve_ip cmd;
673 struct sockaddr *src, *dst;
667 struct ucma_context *ctx; 674 struct ucma_context *ctx;
668 int ret; 675 int ret;
669 676
670 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 677 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
671 return -EFAULT; 678 return -EFAULT;
672 679
680 src = (struct sockaddr *) &cmd.src_addr;
681 dst = (struct sockaddr *) &cmd.dst_addr;
682 if (!rdma_addr_size(src) || !rdma_addr_size(dst))
683 return -EINVAL;
684
673 ctx = ucma_get_ctx(file, cmd.id); 685 ctx = ucma_get_ctx(file, cmd.id);
674 if (IS_ERR(ctx)) 686 if (IS_ERR(ctx))
675 return PTR_ERR(ctx); 687 return PTR_ERR(ctx);
676 688
677 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 689 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
678 (struct sockaddr *) &cmd.dst_addr,
679 cmd.timeout_ms);
680 ucma_put_ctx(ctx); 690 ucma_put_ctx(ctx);
681 return ret; 691 return ret;
682} 692}
@@ -1149,6 +1159,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1149 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1159 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1150 return -EFAULT; 1160 return -EFAULT;
1151 1161
1162 if (cmd.qp_state > IB_QPS_ERR)
1163 return -EINVAL;
1164
1152 ctx = ucma_get_ctx(file, cmd.id); 1165 ctx = ucma_get_ctx(file, cmd.id);
1153 if (IS_ERR(ctx)) 1166 if (IS_ERR(ctx))
1154 return PTR_ERR(ctx); 1167 return PTR_ERR(ctx);
@@ -1294,6 +1307,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1294 if (IS_ERR(ctx)) 1307 if (IS_ERR(ctx))
1295 return PTR_ERR(ctx); 1308 return PTR_ERR(ctx);
1296 1309
1310 if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
1311 return -EINVAL;
1312
1297 optval = memdup_user((void __user *) (unsigned long) cmd.optval, 1313 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1298 cmd.optlen); 1314 cmd.optlen);
1299 if (IS_ERR(optval)) { 1315 if (IS_ERR(optval)) {
@@ -1343,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1343 return -ENOSPC; 1359 return -ENOSPC;
1344 1360
1345 addr = (struct sockaddr *) &cmd->addr; 1361 addr = (struct sockaddr *) &cmd->addr;
1346 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1362 if (cmd->addr_size != rdma_addr_size(addr))
1347 return -EINVAL; 1363 return -EINVAL;
1348 1364
1349 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1365 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
@@ -1411,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1411 join_cmd.uid = cmd.uid; 1427 join_cmd.uid = cmd.uid;
1412 join_cmd.id = cmd.id; 1428 join_cmd.id = cmd.id;
1413 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1429 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1430 if (!join_cmd.addr_size)
1431 return -EINVAL;
1432
1414 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1433 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1415 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1434 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1416 1435
@@ -1426,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1426 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1445 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1427 return -EFAULT; 1446 return -EFAULT;
1428 1447
1448 if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
1449 return -EINVAL;
1450
1429 return ucma_process_join(file, &cmd, out_len); 1451 return ucma_process_join(file, &cmd, out_len);
1430} 1452}
1431 1453
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 3eb7a8387116..96f76896488d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -57,8 +57,8 @@
57#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) 57#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
58#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) 58#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
59 59
60#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) 60#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
61#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) 61#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
62#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH 62#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
63 63
64#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) 64#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 643174d949a8..8301d7e5fa8c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -785,7 +785,7 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
785 return 0; 785 return 0;
786} 786}
787 787
788static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) 788unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 789 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
790{ 790{
791 unsigned long flags; 791 unsigned long flags;
@@ -799,8 +799,8 @@ static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
799 return flags; 799 return flags;
800} 800}
801 801
802static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, 802void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
803 unsigned long flags) 803 unsigned long flags)
804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 804 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
805{ 805{
806 if (qp->rcq != qp->scq) 806 if (qp->rcq != qp->scq)
@@ -1606,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1606 int status; 1606 int status;
1607 union ib_gid sgid; 1607 union ib_gid sgid;
1608 struct ib_gid_attr sgid_attr; 1608 struct ib_gid_attr sgid_attr;
1609 unsigned int flags;
1609 u8 nw_type; 1610 u8 nw_type;
1610 1611
1611 qp->qplib_qp.modify_flags = 0; 1612 qp->qplib_qp.modify_flags = 0;
@@ -1634,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1634 dev_dbg(rdev_to_dev(rdev), 1635 dev_dbg(rdev_to_dev(rdev),
1635 "Move QP = %p to flush list\n", 1636 "Move QP = %p to flush list\n",
1636 qp); 1637 qp);
1638 flags = bnxt_re_lock_cqs(qp);
1637 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 1639 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1640 bnxt_re_unlock_cqs(qp, flags);
1638 } 1641 }
1639 if (!qp->sumem && 1642 if (!qp->sumem &&
1640 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { 1643 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1641 dev_dbg(rdev_to_dev(rdev), 1644 dev_dbg(rdev_to_dev(rdev),
1642 "Move QP = %p out of flush list\n", 1645 "Move QP = %p out of flush list\n",
1643 qp); 1646 qp);
1647 flags = bnxt_re_lock_cqs(qp);
1644 bnxt_qplib_clean_qp(&qp->qplib_qp); 1648 bnxt_qplib_clean_qp(&qp->qplib_qp);
1649 bnxt_re_unlock_cqs(qp, flags);
1645 } 1650 }
1646 } 1651 }
1647 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { 1652 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
@@ -2227,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr,
2227 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; 2232 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2228 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; 2233 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2229 2234
2235 /* Need unconditional fence for local invalidate
2236 * opcode to work as expected.
2237 */
2238 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2239
2230 if (wr->send_flags & IB_SEND_SIGNALED) 2240 if (wr->send_flags & IB_SEND_SIGNALED)
2231 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2241 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2232 if (wr->send_flags & IB_SEND_FENCE)
2233 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2234 if (wr->send_flags & IB_SEND_SOLICITED) 2242 if (wr->send_flags & IB_SEND_SOLICITED)
2235 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; 2243 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2236 2244
@@ -2251,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr,
2251 wqe->frmr.levels = qplib_frpl->hwq.level + 1; 2259 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2252 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; 2260 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2253 2261
2254 if (wr->wr.send_flags & IB_SEND_FENCE) 2262 /* Need unconditional fence for reg_mr
2255 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 2263 * opcode to function as expected.
2264 */
2265
2266 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2267
2256 if (wr->wr.send_flags & IB_SEND_SIGNALED) 2268 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2257 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 2269 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2258 2270
@@ -3586,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3586 int umem_pgs, page_shift, rc; 3598 int umem_pgs, page_shift, rc;
3587 3599
3588 if (length > BNXT_RE_MAX_MR_SIZE) { 3600 if (length > BNXT_RE_MAX_MR_SIZE) {
3589 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", 3601 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3590 length, BNXT_RE_MAX_MR_SIZE); 3602 length, BNXT_RE_MAX_MR_SIZE);
3591 return ERR_PTR(-ENOMEM); 3603 return ERR_PTR(-ENOMEM);
3592 } 3604 }
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b88a48d43a9d..e62b7c2c7da6 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -222,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
222 struct ib_udata *udata); 222 struct ib_udata *udata);
223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); 223int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 224int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
225
226unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
227void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
225#endif /* __BNXT_RE_IB_VERBS_H__ */ 228#endif /* __BNXT_RE_IB_VERBS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 33a448036c2e..f6e361750466 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -730,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
730 struct bnxt_re_qp *qp) 730 struct bnxt_re_qp *qp)
731{ 731{
732 struct ib_event event; 732 struct ib_event event;
733 unsigned int flags;
734
735 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
736 flags = bnxt_re_lock_cqs(qp);
737 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
738 bnxt_re_unlock_cqs(qp, flags);
739 }
733 740
734 memset(&event, 0, sizeof(event)); 741 memset(&event, 0, sizeof(event));
735 if (qp->qplib_qp.srq) { 742 if (qp->qplib_qp.srq) {
@@ -1416,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work)
1416 switch (re_work->event) { 1423 switch (re_work->event) {
1417 case NETDEV_REGISTER: 1424 case NETDEV_REGISTER:
1418 rc = bnxt_re_ib_reg(rdev); 1425 rc = bnxt_re_ib_reg(rdev);
1419 if (rc) 1426 if (rc) {
1420 dev_err(rdev_to_dev(rdev), 1427 dev_err(rdev_to_dev(rdev),
1421 "Failed to register with IB: %#x", rc); 1428 "Failed to register with IB: %#x", rc);
1429 bnxt_re_remove_one(rdev);
1430 bnxt_re_dev_unreg(rdev);
1431 }
1422 break; 1432 break;
1423 case NETDEV_UP: 1433 case NETDEV_UP:
1424 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1434 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3ea5b9624f6b..3a78faba8d91 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
88 } 88 }
89} 89}
90 90
91void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, 91static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 unsigned long *flags) 92 unsigned long *flags)
93 __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) 93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94{ 94{
95 spin_lock_irqsave(&qp->scq->hwq.lock, *flags); 95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq) 96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->hwq.lock); 97 __acquire(&qp->rcq->flush_lock);
98 else 98 else
99 spin_lock(&qp->rcq->hwq.lock); 99 spin_lock(&qp->rcq->flush_lock);
100} 100}
101 101
102void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, 102static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags) 103 unsigned long *flags)
104 __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) 104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105{ 105{
106 if (qp->scq == qp->rcq) 106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->hwq.lock); 107 __release(&qp->rcq->flush_lock);
108 else 108 else
109 spin_unlock(&qp->rcq->hwq.lock); 109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); 110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111}
112
113static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
114 struct bnxt_qplib_cq *cq)
115{
116 struct bnxt_qplib_cq *buddy_cq = NULL;
117
118 if (qp->scq == qp->rcq)
119 buddy_cq = NULL;
120 else if (qp->scq == cq)
121 buddy_cq = qp->rcq;
122 else
123 buddy_cq = qp->scq;
124 return buddy_cq;
125}
126
127static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
128 struct bnxt_qplib_cq *cq)
129 __acquires(&buddy_cq->hwq.lock)
130{
131 struct bnxt_qplib_cq *buddy_cq = NULL;
132
133 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
134 if (!buddy_cq)
135 __acquire(&cq->hwq.lock);
136 else
137 spin_lock(&buddy_cq->hwq.lock);
138}
139
140static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
141 struct bnxt_qplib_cq *cq)
142 __releases(&buddy_cq->hwq.lock)
143{
144 struct bnxt_qplib_cq *buddy_cq = NULL;
145
146 buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
147 if (!buddy_cq)
148 __release(&cq->hwq.lock);
149 else
150 spin_unlock(&buddy_cq->hwq.lock);
151} 111}
152 112
153void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) 113void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
154{ 114{
155 unsigned long flags; 115 unsigned long flags;
156 116
157 bnxt_qplib_acquire_cq_locks(qp, &flags); 117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
158 __bnxt_qplib_add_flush_qp(qp); 118 __bnxt_qplib_add_flush_qp(qp);
159 bnxt_qplib_release_cq_locks(qp, &flags); 119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
160} 120}
161 121
162static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) 122static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
@@ -177,7 +137,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
177{ 137{
178 unsigned long flags; 138 unsigned long flags;
179 139
180 bnxt_qplib_acquire_cq_locks(qp, &flags); 140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
181 __clean_cq(qp->scq, (u64)(unsigned long)qp); 141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
182 qp->sq.hwq.prod = 0; 142 qp->sq.hwq.prod = 0;
183 qp->sq.hwq.cons = 0; 143 qp->sq.hwq.cons = 0;
@@ -186,7 +146,7 @@ void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
186 qp->rq.hwq.cons = 0; 146 qp->rq.hwq.cons = 0;
187 147
188 __bnxt_qplib_del_flush_qp(qp); 148 __bnxt_qplib_del_flush_qp(qp);
189 bnxt_qplib_release_cq_locks(qp, &flags); 149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
190} 150}
191 151
192static void bnxt_qpn_cqn_sched_task(struct work_struct *work) 152static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
@@ -283,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
283 u32 sw_cons, raw_cons; 243 u32 sw_cons, raw_cons;
284 u16 type; 244 u16 type;
285 int budget = nq->budget; 245 int budget = nq->budget;
286 u64 q_handle; 246 uintptr_t q_handle;
287 247
288 /* Service the NQ until empty */ 248 /* Service the NQ until empty */
289 raw_cons = hwq->cons; 249 raw_cons = hwq->cons;
@@ -566,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
566 526
567 /* Configure the request */ 527 /* Configure the request */
568 req.dpi = cpu_to_le32(srq->dpi->dpi); 528 req.dpi = cpu_to_le32(srq->dpi->dpi);
569 req.srq_handle = cpu_to_le64(srq); 529 req.srq_handle = cpu_to_le64((uintptr_t)srq);
570 530
571 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); 531 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
572 pbl = &srq->hwq.pbl[PBL_LVL_0]; 532 pbl = &srq->hwq.pbl[PBL_LVL_0];
@@ -2107,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
2107 /* Must block new posting of SQ and RQ */ 2067 /* Must block new posting of SQ and RQ */
2108 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2068 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2109 bnxt_qplib_cancel_phantom_processing(qp); 2069 bnxt_qplib_cancel_phantom_processing(qp);
2110
2111 /* Add qp to flush list of the CQ */
2112 __bnxt_qplib_add_flush_qp(qp);
2113} 2070}
2114 2071
2115/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 2072/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
@@ -2285,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2285 sw_sq_cons, cqe->wr_id, cqe->status); 2242 sw_sq_cons, cqe->wr_id, cqe->status);
2286 cqe++; 2243 cqe++;
2287 (*budget)--; 2244 (*budget)--;
2288 bnxt_qplib_lock_buddy_cq(qp, cq);
2289 bnxt_qplib_mark_qp_error(qp); 2245 bnxt_qplib_mark_qp_error(qp);
2290 bnxt_qplib_unlock_buddy_cq(qp, cq); 2246 /* Add qp to flush list of the CQ */
2247 bnxt_qplib_add_flush_qp(qp);
2291 } else { 2248 } else {
2292 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 2249 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2293 /* Before we complete, do WA 9060 */ 2250 /* Before we complete, do WA 9060 */
@@ -2403,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2403 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2360 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2404 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2361 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2405 /* Add qp to flush list of the CQ */ 2362 /* Add qp to flush list of the CQ */
2406 bnxt_qplib_lock_buddy_cq(qp, cq); 2363 bnxt_qplib_add_flush_qp(qp);
2407 __bnxt_qplib_add_flush_qp(qp);
2408 bnxt_qplib_unlock_buddy_cq(qp, cq);
2409 } 2364 }
2410 } 2365 }
2411 2366
@@ -2489,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2489 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2444 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2490 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2445 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2491 /* Add qp to flush list of the CQ */ 2446 /* Add qp to flush list of the CQ */
2492 bnxt_qplib_lock_buddy_cq(qp, cq); 2447 bnxt_qplib_add_flush_qp(qp);
2493 __bnxt_qplib_add_flush_qp(qp);
2494 bnxt_qplib_unlock_buddy_cq(qp, cq);
2495 } 2448 }
2496 } 2449 }
2497done: 2450done:
@@ -2501,11 +2454,9 @@ done:
2501bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) 2454bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2502{ 2455{
2503 struct cq_base *hw_cqe, **hw_cqe_ptr; 2456 struct cq_base *hw_cqe, **hw_cqe_ptr;
2504 unsigned long flags;
2505 u32 sw_cons, raw_cons; 2457 u32 sw_cons, raw_cons;
2506 bool rc = true; 2458 bool rc = true;
2507 2459
2508 spin_lock_irqsave(&cq->hwq.lock, flags);
2509 raw_cons = cq->hwq.cons; 2460 raw_cons = cq->hwq.cons;
2510 sw_cons = HWQ_CMP(raw_cons, &cq->hwq); 2461 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2511 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 2462 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
@@ -2513,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2513 2464
2514 /* Check for Valid bit. If the CQE is valid, return false */ 2465 /* Check for Valid bit. If the CQE is valid, return false */
2515 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); 2466 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2516 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2517 return rc; 2467 return rc;
2518} 2468}
2519 2469
@@ -2602,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2602 if (hwcqe->status != CQ_RES_RC_STATUS_OK) { 2552 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2603 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 2553 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2604 /* Add qp to flush list of the CQ */ 2554 /* Add qp to flush list of the CQ */
2605 bnxt_qplib_lock_buddy_cq(qp, cq); 2555 bnxt_qplib_add_flush_qp(qp);
2606 __bnxt_qplib_add_flush_qp(qp);
2607 bnxt_qplib_unlock_buddy_cq(qp, cq);
2608 } 2556 }
2609 } 2557 }
2610 2558
@@ -2719,9 +2667,7 @@ do_rq:
2719 */ 2667 */
2720 2668
2721 /* Add qp to flush list of the CQ */ 2669 /* Add qp to flush list of the CQ */
2722 bnxt_qplib_lock_buddy_cq(qp, cq); 2670 bnxt_qplib_add_flush_qp(qp);
2723 __bnxt_qplib_add_flush_qp(qp);
2724 bnxt_qplib_unlock_buddy_cq(qp, cq);
2725done: 2671done:
2726 return rc; 2672 return rc;
2727} 2673}
@@ -2750,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2750 u32 budget = num_cqes; 2696 u32 budget = num_cqes;
2751 unsigned long flags; 2697 unsigned long flags;
2752 2698
2753 spin_lock_irqsave(&cq->hwq.lock, flags); 2699 spin_lock_irqsave(&cq->flush_lock, flags);
2754 list_for_each_entry(qp, &cq->sqf_head, sq_flush) { 2700 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2755 dev_dbg(&cq->hwq.pdev->dev, 2701 dev_dbg(&cq->hwq.pdev->dev,
2756 "QPLIB: FP: Flushing SQ QP= %p", 2702 "QPLIB: FP: Flushing SQ QP= %p",
@@ -2764,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2764 qp); 2710 qp);
2765 __flush_rq(&qp->rq, qp, &cqe, &budget); 2711 __flush_rq(&qp->rq, qp, &cqe, &budget);
2766 } 2712 }
2767 spin_unlock_irqrestore(&cq->hwq.lock, flags); 2713 spin_unlock_irqrestore(&cq->flush_lock, flags);
2768 2714
2769 return num_cqes - budget; 2715 return num_cqes - budget;
2770} 2716}
@@ -2773,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2773 int num_cqes, struct bnxt_qplib_qp **lib_qp) 2719 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2774{ 2720{
2775 struct cq_base *hw_cqe, **hw_cqe_ptr; 2721 struct cq_base *hw_cqe, **hw_cqe_ptr;
2776 unsigned long flags;
2777 u32 sw_cons, raw_cons; 2722 u32 sw_cons, raw_cons;
2778 int budget, rc = 0; 2723 int budget, rc = 0;
2779 2724
2780 spin_lock_irqsave(&cq->hwq.lock, flags);
2781 raw_cons = cq->hwq.cons; 2725 raw_cons = cq->hwq.cons;
2782 budget = num_cqes; 2726 budget = num_cqes;
2783 2727
@@ -2853,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2853 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); 2797 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2854 } 2798 }
2855exit: 2799exit:
2856 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2857 return num_cqes - budget; 2800 return num_cqes - budget;
2858} 2801}
2859 2802
2860void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) 2803void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2861{ 2804{
2862 unsigned long flags;
2863
2864 spin_lock_irqsave(&cq->hwq.lock, flags);
2865 if (arm_type) 2805 if (arm_type)
2866 bnxt_qplib_arm_cq(cq, arm_type); 2806 bnxt_qplib_arm_cq(cq, arm_type);
2867 /* Using cq->arm_state variable to track whether to issue cq handler */ 2807 /* Using cq->arm_state variable to track whether to issue cq handler */
2868 atomic_set(&cq->arm_state, 1); 2808 atomic_set(&cq->arm_state, 1);
2869 spin_unlock_irqrestore(&cq->hwq.lock, flags);
2870} 2809}
2871 2810
2872void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) 2811void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ca0a2ffa3509..ade9f13c0fd1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq {
389 struct list_head sqf_head, rqf_head; 389 struct list_head sqf_head, rqf_head;
390 atomic_t arm_state; 390 atomic_t arm_state;
391 spinlock_t compl_lock; /* synch CQ handlers */ 391 spinlock_t compl_lock; /* synch CQ handlers */
392/* Locking Notes:
393 * QP can move to error state from modify_qp, async error event or error
394 * CQE as part of poll_cq. When QP is moved to error state, it gets added
395 * to two flush lists, one each for SQ and RQ.
396 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
397 * flush_locks should be acquired when QP is moved to error. The control path
398 * operations(modify_qp and async error events) are synchronized with poll_cq
399 * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ.
400 * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq
401 * of the same QP while manipulating the flush list.
402 */
403 spinlock_t flush_lock; /* QP flush management */
392}; 404};
393 405
394#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) 406#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 8329ec6a7946..80027a494730 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
305 err_event->res_err_state_reason); 305 err_event->res_err_state_reason);
306 if (!qp) 306 if (!qp)
307 break; 307 break;
308 bnxt_qplib_acquire_cq_locks(qp, &flags);
309 bnxt_qplib_mark_qp_error(qp); 308 bnxt_qplib_mark_qp_error(qp);
310 bnxt_qplib_release_cq_locks(qp, &flags); 309 rcfw->aeq_handler(rcfw, qp_event, qp);
311 break; 310 break;
312 default: 311 default:
313 /* Command Response */ 312 /* Command Response */
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
460 int rc; 459 int rc;
461 460
462 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 461 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
463 462 /* Supply (log-base-2-of-host-page-size - base-page-shift)
463 * to bono to adjust the doorbell page sizes.
464 */
465 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
466 RCFW_DBR_BASE_PAGE_SHIFT);
464 /* 467 /*
465 * VFs need not setup the HW context area, PF 468 * VFs need not setup the HW context area, PF
466 * shall setup this area for VF. Skipping the 469 * shall setup this area for VF. Skipping the
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 6bee6e3636ea..c7cce2e4185e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -49,6 +49,7 @@
49#define RCFW_COMM_SIZE 0x104 49#define RCFW_COMM_SIZE 0x104
50 50
51#define RCFW_DBR_PCI_BAR_REGION 2 51#define RCFW_DBR_PCI_BAR_REGION 2
52#define RCFW_DBR_BASE_PAGE_SHIFT 12
52 53
53#define RCFW_CMD_PREP(req, CMD, cmd_flags) \ 54#define RCFW_CMD_PREP(req, CMD, cmd_flags) \
54 do { \ 55 do { \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 03057983341f..ee98e5efef84 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -139,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
139 attr->max_pkey = le32_to_cpu(sb->max_pkeys); 139 attr->max_pkey = le32_to_cpu(sb->max_pkeys);
140 140
141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data); 141 attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
142 attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; 142 attr->l2_db_size = (sb->l2_db_space_size + 1) *
143 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
143 attr->max_sgid = le32_to_cpu(sb->max_gid); 144 attr->max_sgid = le32_to_cpu(sb->max_gid);
144 145
145 bnxt_qplib_query_version(rcfw, attr->fw_ver); 146 bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 2d7ea096a247..3e5a4f760d0e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw {
1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) 1761 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4)
1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) 1762 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4)
1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) 1763 #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4)
1764 __le16 reserved16; 1764 /* This value is (log-base-2-of-DBR-page-size - 12).
1765 * 0 for 4KB. HW supported values are enumerated below.
1766 */
1767 __le16 log2_dbr_pg_size;
1768 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL
1769 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
1770 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL
1771 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL
1772 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL
1773 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL
1774 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL
1775 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL
1776 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL
1777 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL
1778 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL
1779 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL
1780 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL
1781 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL
1782 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL
1783 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL
1784 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL
1785 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL
1786 #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \
1787 CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M
1765 __le64 qpc_page_dir; 1788 __le64 qpc_page_dir;
1766 __le64 mrw_page_dir; 1789 __le64 mrw_page_dir;
1767 __le64 srq_page_dir; 1790 __le64 srq_page_dir;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9a566ee3ceff..82adc0d1d30e 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
601 wc->dlid_path_bits = 0; 601 wc->dlid_path_bits = 0;
602 602
603 if (is_eth) { 603 if (is_eth) {
604 wc->slid = 0;
604 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); 605 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
605 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); 606 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
606 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); 607 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
@@ -851,7 +852,6 @@ repoll:
851 } 852 }
852 } 853 }
853 854
854 wc->slid = be16_to_cpu(cqe->rlid);
855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 855 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
856 wc->src_qp = g_mlpath_rqpn & 0xffffff; 856 wc->src_qp = g_mlpath_rqpn & 0xffffff;
857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 857 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -860,6 +860,7 @@ repoll:
860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 860 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 861 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
862 if (is_eth) { 862 if (is_eth) {
863 wc->slid = 0;
863 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 864 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
864 if (be32_to_cpu(cqe->vlan_my_qpn) & 865 if (be32_to_cpu(cqe->vlan_my_qpn) &
865 MLX4_CQE_CVLAN_PRESENT_MASK) { 866 MLX4_CQE_CVLAN_PRESENT_MASK) {
@@ -871,6 +872,7 @@ repoll:
871 memcpy(wc->smac, cqe->smac, ETH_ALEN); 872 memcpy(wc->smac, cqe->smac, ETH_ALEN);
872 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); 873 wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
873 } else { 874 } else {
875 wc->slid = be16_to_cpu(cqe->rlid);
874 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 876 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
875 wc->vlan_id = 0xffff; 877 wc->vlan_id = 0xffff;
876 } 878 }
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8d2ee9322f2e..5a0e4fc4785a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
219 gid_tbl[i].version = 2; 219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) 220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221 gid_tbl[i].type = 1; 221 gid_tbl[i].type = 1;
222 else
223 memset(&gid_tbl[i].gid, 0, 12);
224 } 222 }
225 } 223 }
226 224
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device,
366 if (!gids) { 364 if (!gids) {
367 ret = -ENOMEM; 365 ret = -ENOMEM;
368 } else { 366 } else {
369 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) 367 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
370 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); 368 memcpy(&gids[i].gid,
369 &port_gid_table->gids[i].gid,
370 sizeof(union ib_gid));
371 gids[i].gid_type =
372 port_gid_table->gids[i].gid_type;
373 }
371 } 374 }
372 } 375 }
373 spin_unlock_bh(&iboe->lock); 376 spin_unlock_bh(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index c4c7b82f4ac1..94a27d89a303 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -221,7 +221,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
222 break; 222 break;
223 } 223 }
224 wc->slid = be16_to_cpu(cqe->slid);
225 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 224 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
226 wc->dlid_path_bits = cqe->ml_path; 225 wc->dlid_path_bits = cqe->ml_path;
227 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 226 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -236,10 +235,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
236 } 235 }
237 236
238 if (ll != IB_LINK_LAYER_ETHERNET) { 237 if (ll != IB_LINK_LAYER_ETHERNET) {
238 wc->slid = be16_to_cpu(cqe->slid);
239 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 239 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
240 return; 240 return;
241 } 241 }
242 242
243 wc->slid = 0;
243 vlan_present = cqe->l4_l3_hdr_type & 0x1; 244 vlan_present = cqe->l4_l3_hdr_type & 0x1;
244 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 245 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
245 if (vlan_present) { 246 if (vlan_present) {
@@ -1188,7 +1189,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1188 if (ucmd.reserved0 || ucmd.reserved1) 1189 if (ucmd.reserved0 || ucmd.reserved1)
1189 return -EINVAL; 1190 return -EINVAL;
1190 1191
1191 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1192 /* check multiplication overflow */
1193 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1194 return -EINVAL;
1195
1196 umem = ib_umem_get(context, ucmd.buf_addr,
1197 (size_t)ucmd.cqe_size * entries,
1192 IB_ACCESS_LOCAL_WRITE, 1); 1198 IB_ACCESS_LOCAL_WRITE, 1);
1193 if (IS_ERR(umem)) { 1199 if (IS_ERR(umem)) {
1194 err = PTR_ERR(umem); 1200 err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 61cc3d7db257..0e04fdddf670 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -30,12 +30,15 @@ static const struct mlx5_ib_profile rep_profile = {
30 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 30 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
31 mlx5_ib_stage_bfrag_init, 31 mlx5_ib_stage_bfrag_init,
32 mlx5_ib_stage_bfrag_cleanup), 32 mlx5_ib_stage_bfrag_cleanup),
33 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
34 NULL,
35 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
33 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 36 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
34 mlx5_ib_stage_ib_reg_init, 37 mlx5_ib_stage_ib_reg_init,
35 mlx5_ib_stage_ib_reg_cleanup), 38 mlx5_ib_stage_ib_reg_cleanup),
36 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 39 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
37 mlx5_ib_stage_umr_res_init, 40 mlx5_ib_stage_post_ib_reg_umr_init,
38 mlx5_ib_stage_umr_res_cleanup), 41 NULL),
39 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 42 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
40 mlx5_ib_stage_class_attr_init, 43 mlx5_ib_stage_class_attr_init,
41 NULL), 44 NULL),
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d9474b95d8e5..390e4375647e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -256,12 +256,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
256 struct mlx5_ib_multiport_info *mpi; 256 struct mlx5_ib_multiport_info *mpi;
257 struct mlx5_ib_port *port; 257 struct mlx5_ib_port *port;
258 258
259 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
260 ll != IB_LINK_LAYER_ETHERNET) {
261 if (native_port_num)
262 *native_port_num = ib_port_num;
263 return ibdev->mdev;
264 }
265
259 if (native_port_num) 266 if (native_port_num)
260 *native_port_num = 1; 267 *native_port_num = 1;
261 268
262 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
263 return ibdev->mdev;
264
265 port = &ibdev->port[ib_port_num - 1]; 269 port = &ibdev->port[ib_port_num - 1];
266 if (!port) 270 if (!port)
267 return NULL; 271 return NULL;
@@ -3297,7 +3301,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3297 struct mlx5_ib_dev *ibdev; 3301 struct mlx5_ib_dev *ibdev;
3298 struct ib_event ibev; 3302 struct ib_event ibev;
3299 bool fatal = false; 3303 bool fatal = false;
3300 u8 port = 0; 3304 u8 port = (u8)work->param;
3301 3305
3302 if (mlx5_core_is_mp_slave(work->dev)) { 3306 if (mlx5_core_is_mp_slave(work->dev)) {
3303 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 3307 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3317,8 +3321,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3317 case MLX5_DEV_EVENT_PORT_UP: 3321 case MLX5_DEV_EVENT_PORT_UP:
3318 case MLX5_DEV_EVENT_PORT_DOWN: 3322 case MLX5_DEV_EVENT_PORT_DOWN:
3319 case MLX5_DEV_EVENT_PORT_INITIALIZED: 3323 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3320 port = (u8)work->param;
3321
3322 /* In RoCE, port up/down events are handled in 3324 /* In RoCE, port up/down events are handled in
3323 * mlx5_netdev_event(). 3325 * mlx5_netdev_event().
3324 */ 3326 */
@@ -3332,24 +3334,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3332 3334
3333 case MLX5_DEV_EVENT_LID_CHANGE: 3335 case MLX5_DEV_EVENT_LID_CHANGE:
3334 ibev.event = IB_EVENT_LID_CHANGE; 3336 ibev.event = IB_EVENT_LID_CHANGE;
3335 port = (u8)work->param;
3336 break; 3337 break;
3337 3338
3338 case MLX5_DEV_EVENT_PKEY_CHANGE: 3339 case MLX5_DEV_EVENT_PKEY_CHANGE:
3339 ibev.event = IB_EVENT_PKEY_CHANGE; 3340 ibev.event = IB_EVENT_PKEY_CHANGE;
3340 port = (u8)work->param;
3341
3342 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 3341 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3343 break; 3342 break;
3344 3343
3345 case MLX5_DEV_EVENT_GUID_CHANGE: 3344 case MLX5_DEV_EVENT_GUID_CHANGE:
3346 ibev.event = IB_EVENT_GID_CHANGE; 3345 ibev.event = IB_EVENT_GID_CHANGE;
3347 port = (u8)work->param;
3348 break; 3346 break;
3349 3347
3350 case MLX5_DEV_EVENT_CLIENT_REREG: 3348 case MLX5_DEV_EVENT_CLIENT_REREG:
3351 ibev.event = IB_EVENT_CLIENT_REREGISTER; 3349 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3352 port = (u8)work->param;
3353 break; 3350 break;
3354 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 3351 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3355 schedule_work(&ibdev->delay_drop.delay_drop_work); 3352 schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3361,7 +3358,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3361 ibev.device = &ibdev->ib_dev; 3358 ibev.device = &ibdev->ib_dev;
3362 ibev.element.port_num = port; 3359 ibev.element.port_num = port;
3363 3360
3364 if (port < 1 || port > ibdev->num_ports) { 3361 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3365 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 3362 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3366 goto out; 3363 goto out;
3367 } 3364 }
@@ -4999,19 +4996,19 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4999 return ib_register_device(&dev->ib_dev, NULL); 4996 return ib_register_device(&dev->ib_dev, NULL);
5000} 4997}
5001 4998
5002void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 4999void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
5003{ 5000{
5004 ib_unregister_device(&dev->ib_dev); 5001 destroy_umrc_res(dev);
5005} 5002}
5006 5003
5007int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) 5004void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5008{ 5005{
5009 return create_umr_res(dev); 5006 ib_unregister_device(&dev->ib_dev);
5010} 5007}
5011 5008
5012void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) 5009int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
5013{ 5010{
5014 destroy_umrc_res(dev); 5011 return create_umr_res(dev);
5015} 5012}
5016 5013
5017static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 5014static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@ -5130,12 +5127,15 @@ static const struct mlx5_ib_profile pf_profile = {
5130 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5127 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5131 mlx5_ib_stage_bfrag_init, 5128 mlx5_ib_stage_bfrag_init,
5132 mlx5_ib_stage_bfrag_cleanup), 5129 mlx5_ib_stage_bfrag_cleanup),
5130 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5131 NULL,
5132 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5134 mlx5_ib_stage_ib_reg_init, 5134 mlx5_ib_stage_ib_reg_init,
5135 mlx5_ib_stage_ib_reg_cleanup), 5135 mlx5_ib_stage_ib_reg_cleanup),
5136 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5136 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5137 mlx5_ib_stage_umr_res_init, 5137 mlx5_ib_stage_post_ib_reg_umr_init,
5138 mlx5_ib_stage_umr_res_cleanup), 5138 NULL),
5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5140 mlx5_ib_stage_delay_drop_init, 5140 mlx5_ib_stage_delay_drop_init,
5141 mlx5_ib_stage_delay_drop_cleanup), 5141 mlx5_ib_stage_delay_drop_cleanup),
@@ -5172,12 +5172,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5173 mlx5_ib_stage_bfrag_init, 5173 mlx5_ib_stage_bfrag_init,
5174 mlx5_ib_stage_bfrag_cleanup), 5174 mlx5_ib_stage_bfrag_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5176 NULL,
5177 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5178 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5176 mlx5_ib_stage_ib_reg_init, 5179 mlx5_ib_stage_ib_reg_init,
5177 mlx5_ib_stage_ib_reg_cleanup), 5180 mlx5_ib_stage_ib_reg_cleanup),
5178 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5181 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5179 mlx5_ib_stage_umr_res_init, 5182 mlx5_ib_stage_post_ib_reg_umr_init,
5180 mlx5_ib_stage_umr_res_cleanup), 5183 NULL),
5181 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 5184 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5182 mlx5_ib_stage_class_attr_init, 5185 mlx5_ib_stage_class_attr_init,
5183 NULL), 5186 NULL),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e0bad28e0f09..c33bf1523d67 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -742,8 +742,9 @@ enum mlx5_ib_stages {
742 MLX5_IB_STAGE_CONG_DEBUGFS, 742 MLX5_IB_STAGE_CONG_DEBUGFS,
743 MLX5_IB_STAGE_UAR, 743 MLX5_IB_STAGE_UAR,
744 MLX5_IB_STAGE_BFREG, 744 MLX5_IB_STAGE_BFREG,
745 MLX5_IB_STAGE_PRE_IB_REG_UMR,
745 MLX5_IB_STAGE_IB_REG, 746 MLX5_IB_STAGE_IB_REG,
746 MLX5_IB_STAGE_UMR_RESOURCES, 747 MLX5_IB_STAGE_POST_IB_REG_UMR,
747 MLX5_IB_STAGE_DELAY_DROP, 748 MLX5_IB_STAGE_DELAY_DROP,
748 MLX5_IB_STAGE_CLASS_ATTR, 749 MLX5_IB_STAGE_CLASS_ATTR,
749 MLX5_IB_STAGE_REP_REG, 750 MLX5_IB_STAGE_REP_REG,
@@ -1068,10 +1069,10 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
1068void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev); 1069void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
1069int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev); 1070int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
1070void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev); 1071void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
1072void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
1071int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev); 1073int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
1072void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev); 1074void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
1073int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev); 1075int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
1074void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
1075int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev); 1076int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
1076void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 1077void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1077 const struct mlx5_ib_profile *profile, 1078 const struct mlx5_ib_profile *profile,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a5fad3e87ff7..95a36e9ea552 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -839,7 +839,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
839 *umem = ib_umem_get(pd->uobject->context, start, length, 839 *umem = ib_umem_get(pd->uobject->context, start, length,
840 access_flags, 0); 840 access_flags, 0);
841 err = PTR_ERR_OR_ZERO(*umem); 841 err = PTR_ERR_OR_ZERO(*umem);
842 if (err < 0) { 842 if (err) {
843 *umem = NULL;
843 mlx5_ib_err(dev, "umem get failed (%d)\n", err); 844 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
844 return err; 845 return err;
845 } 846 }
@@ -1416,6 +1417,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1416 if (err) { 1417 if (err) {
1417 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1418 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1418 ib_umem_release(mr->umem); 1419 ib_umem_release(mr->umem);
1420 mr->umem = NULL;
1419 clean_mr(dev, mr); 1421 clean_mr(dev, mr);
1420 return err; 1422 return err;
1421 } 1423 }
@@ -1499,14 +1501,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1499 u32 key = mr->mmkey.key; 1501 u32 key = mr->mmkey.key;
1500 1502
1501 err = destroy_mkey(dev, mr); 1503 err = destroy_mkey(dev, mr);
1502 kfree(mr);
1503 if (err) { 1504 if (err) {
1504 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1505 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1505 key, err); 1506 key, err);
1506 return err; 1507 return err;
1507 } 1508 }
1508 } else {
1509 mlx5_mr_cache_free(dev, mr);
1510 } 1509 }
1511 1510
1512 return 0; 1511 return 0;
@@ -1549,6 +1548,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1549 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1548 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1550 } 1549 }
1551 1550
1551 if (!mr->allocated_from_cache)
1552 kfree(mr);
1553 else
1554 mlx5_mr_cache_free(dev, mr);
1555
1552 return 0; 1556 return 0;
1553} 1557}
1554 1558
@@ -1817,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1817 1821
1818 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1822 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1819 mr->ibmr.length = 0; 1823 mr->ibmr.length = 0;
1820 mr->ndescs = sg_nents;
1821 1824
1822 for_each_sg(sgl, sg, sg_nents, i) { 1825 for_each_sg(sgl, sg, sg_nents, i) {
1823 if (unlikely(i >= mr->max_descs)) 1826 if (unlikely(i >= mr->max_descs))
@@ -1829,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1829 1832
1830 sg_offset = 0; 1833 sg_offset = 0;
1831 } 1834 }
1835 mr->ndescs = i;
1832 1836
1833 if (sg_offset_p) 1837 if (sg_offset_p)
1834 *sg_offset_p = sg_offset; 1838 *sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0e67e3682bca..85c612ac547a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1177,7 +1177,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1177 ib_umem_release(sq->ubuffer.umem); 1177 ib_umem_release(sq->ubuffer.umem);
1178} 1178}
1179 1179
1180static int get_rq_pas_size(void *qpc) 1180static size_t get_rq_pas_size(void *qpc)
1181{ 1181{
1182 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 1182 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1183 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 1183 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
@@ -1193,7 +1193,8 @@ static int get_rq_pas_size(void *qpc)
1193} 1193}
1194 1194
1195static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1195static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1196 struct mlx5_ib_rq *rq, void *qpin) 1196 struct mlx5_ib_rq *rq, void *qpin,
1197 size_t qpinlen)
1197{ 1198{
1198 struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 1199 struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1199 __be64 *pas; 1200 __be64 *pas;
@@ -1202,9 +1203,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1202 void *rqc; 1203 void *rqc;
1203 void *wq; 1204 void *wq;
1204 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1205 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1205 int inlen; 1206 size_t rq_pas_size = get_rq_pas_size(qpc);
1207 size_t inlen;
1206 int err; 1208 int err;
1207 u32 rq_pas_size = get_rq_pas_size(qpc); 1209
1210 if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1211 return -EINVAL;
1208 1212
1209 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 1213 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1210 in = kvzalloc(inlen, GFP_KERNEL); 1214 in = kvzalloc(inlen, GFP_KERNEL);
@@ -1297,7 +1301,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1297} 1301}
1298 1302
1299static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1303static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1300 u32 *in, 1304 u32 *in, size_t inlen,
1301 struct ib_pd *pd) 1305 struct ib_pd *pd)
1302{ 1306{
1303 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1307 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1329,7 +1333,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1329 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 1333 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1330 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) 1334 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
1331 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 1335 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1332 err = create_raw_packet_qp_rq(dev, rq, in); 1336 err = create_raw_packet_qp_rq(dev, rq, in, inlen);
1333 if (err) 1337 if (err)
1334 goto err_destroy_sq; 1338 goto err_destroy_sq;
1335 1339
@@ -1608,6 +1612,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1608 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1612 u32 uidx = MLX5_IB_DEFAULT_UIDX;
1609 struct mlx5_ib_create_qp ucmd; 1613 struct mlx5_ib_create_qp ucmd;
1610 struct mlx5_ib_qp_base *base; 1614 struct mlx5_ib_qp_base *base;
1615 int mlx5_st;
1611 void *qpc; 1616 void *qpc;
1612 u32 *in; 1617 u32 *in;
1613 int err; 1618 int err;
@@ -1616,6 +1621,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1616 spin_lock_init(&qp->sq.lock); 1621 spin_lock_init(&qp->sq.lock);
1617 spin_lock_init(&qp->rq.lock); 1622 spin_lock_init(&qp->rq.lock);
1618 1623
1624 mlx5_st = to_mlx5_st(init_attr->qp_type);
1625 if (mlx5_st < 0)
1626 return -EINVAL;
1627
1619 if (init_attr->rwq_ind_tbl) { 1628 if (init_attr->rwq_ind_tbl) {
1620 if (!udata) 1629 if (!udata)
1621 return -ENOSYS; 1630 return -ENOSYS;
@@ -1777,7 +1786,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1777 1786
1778 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1787 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1779 1788
1780 MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); 1789 MLX5_SET(qpc, qpc, st, mlx5_st);
1781 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1790 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1782 1791
1783 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1792 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -1891,11 +1900,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1891 } 1900 }
1892 } 1901 }
1893 1902
1903 if (inlen < 0) {
1904 err = -EINVAL;
1905 goto err;
1906 }
1907
1894 if (init_attr->qp_type == IB_QPT_RAW_PACKET || 1908 if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
1895 qp->flags & MLX5_IB_QP_UNDERLAY) { 1909 qp->flags & MLX5_IB_QP_UNDERLAY) {
1896 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; 1910 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
1897 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 1911 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
1898 err = create_raw_packet_qp(dev, qp, in, pd); 1912 err = create_raw_packet_qp(dev, qp, in, inlen, pd);
1899 } else { 1913 } else {
1900 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); 1914 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
1901 } 1915 }
@@ -3116,8 +3130,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3116 goto out; 3130 goto out;
3117 3131
3118 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 3132 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3119 !optab[mlx5_cur][mlx5_new]) 3133 !optab[mlx5_cur][mlx5_new]) {
3134 err = -EINVAL;
3120 goto out; 3135 goto out;
3136 }
3121 3137
3122 op = optab[mlx5_cur][mlx5_new]; 3138 op = optab[mlx5_cur][mlx5_new];
3123 optpar = ib_mask_to_mlx5_opt(attr_mask); 3139 optpar = ib_mask_to_mlx5_opt(attr_mask);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 6d5fadad9090..3c7522d025f2 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
241{ 241{
242 struct mlx5_ib_dev *dev = to_mdev(pd->device); 242 struct mlx5_ib_dev *dev = to_mdev(pd->device);
243 struct mlx5_ib_srq *srq; 243 struct mlx5_ib_srq *srq;
244 int desc_size; 244 size_t desc_size;
245 int buf_size; 245 size_t buf_size;
246 int err; 246 int err;
247 struct mlx5_srq_attr in = {0}; 247 struct mlx5_srq_attr in = {0};
248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
266 266
267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
269 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
270 return ERR_PTR(-EINVAL);
269 desc_size = roundup_pow_of_two(desc_size); 271 desc_size = roundup_pow_of_two(desc_size);
270 desc_size = max_t(int, 32, desc_size); 272 desc_size = max_t(size_t, 32, desc_size);
273 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
274 return ERR_PTR(-EINVAL);
271 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
272 sizeof(struct mlx5_wqe_data_seg); 276 sizeof(struct mlx5_wqe_data_seg);
273 srq->msrq.wqe_shift = ilog2(desc_size); 277 srq->msrq.wqe_shift = ilog2(desc_size);
274 buf_size = srq->msrq.max * desc_size; 278 buf_size = srq->msrq.max * desc_size;
275 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", 279 if (buf_size < desc_size)
276 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 280 return ERR_PTR(-EINVAL);
277 srq->msrq.max_avail_gather);
278 in.type = init_attr->srq_type; 281 in.type = init_attr->srq_type;
279 282
280 if (pd->uobject) 283 if (pd->uobject)
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 478b7317b80a..26dc374787f7 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev,
458 } 458 }
459 return -EINVAL; 459 return -EINVAL;
460 } 460 }
461 neigh = dst_neigh_lookup(dst, &dst_in); 461 neigh = dst_neigh_lookup(dst, &fl6.daddr);
462
463 if (neigh) { 462 if (neigh) {
464 rcu_read_lock(); 463 rcu_read_lock();
465 if (neigh->nud_state & NUD_VALID) { 464 if (neigh->nud_state & NUD_VALID) {
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
494 493
495 qp = idr_find(&dev->qpidr, conn_param->qpn); 494 qp = idr_find(&dev->qpidr, conn_param->qpn);
496 495
497 laddr = (struct sockaddr_in *)&cm_id->local_addr; 496 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
498 raddr = (struct sockaddr_in *)&cm_id->remote_addr; 497 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
499 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 498 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
500 raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 499 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
500
501 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
502 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
503 ntohs(raddr->sin_port));
501 504
502 DP_DEBUG(dev, QEDR_MSG_IWARP, 505 DP_DEBUG(dev, QEDR_MSG_IWARP,
503 "Connect source address: %pISpc, remote address: %pISpc\n", 506 "Connect source address: %pISpc, remote address: %pISpc\n",
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
599 int rc; 602 int rc;
600 int i; 603 int i;
601 604
602 laddr = (struct sockaddr_in *)&cm_id->local_addr; 605 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
603 laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; 606 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
604 607
605 DP_DEBUG(dev, QEDR_MSG_IWARP, 608 DP_DEBUG(dev, QEDR_MSG_IWARP,
606 "Create Listener address: %pISpc\n", &cm_id->local_addr); 609 "Create Listener address: %pISpc\n", &cm_id->local_addr);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 53f00dbf313f..875b17272d65 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3034 3034
3035 switch (wr->opcode) { 3035 switch (wr->opcode) {
3036 case IB_WR_SEND_WITH_IMM: 3036 case IB_WR_SEND_WITH_IMM:
3037 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3038 rc = -EINVAL;
3039 *bad_wr = wr;
3040 break;
3041 }
3037 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; 3042 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3038 swqe = (struct rdma_sq_send_wqe_1st *)wqe; 3043 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
3039 swqe->wqe_size = 2; 3044 swqe->wqe_size = 2;
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
3075 break; 3080 break;
3076 3081
3077 case IB_WR_RDMA_WRITE_WITH_IMM: 3082 case IB_WR_RDMA_WRITE_WITH_IMM:
3083 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3084 rc = -EINVAL;
3085 *bad_wr = wr;
3086 break;
3087 }
3078 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; 3088 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3079 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; 3089 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
3080 3090
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3724{ 3734{
3725 struct qedr_dev *dev = get_qedr_dev(ibcq->device); 3735 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
3726 struct qedr_cq *cq = get_qedr_cq(ibcq); 3736 struct qedr_cq *cq = get_qedr_cq(ibcq);
3727 union rdma_cqe *cqe = cq->latest_cqe; 3737 union rdma_cqe *cqe;
3728 u32 old_cons, new_cons; 3738 u32 old_cons, new_cons;
3729 unsigned long flags; 3739 unsigned long flags;
3730 int update = 0; 3740 int update = 0;
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
3741 return qedr_gsi_poll_cq(ibcq, num_entries, wc); 3751 return qedr_gsi_poll_cq(ibcq, num_entries, wc);
3742 3752
3743 spin_lock_irqsave(&cq->cq_lock, flags); 3753 spin_lock_irqsave(&cq->cq_lock, flags);
3754 cqe = cq->latest_cqe;
3744 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); 3755 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
3745 while (num_entries && is_valid_cqe(cq, cqe)) { 3756 while (num_entries && is_valid_cqe(cq, cqe)) {
3746 struct qedr_qp *qp; 3757 struct qedr_qp *qp;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 1b2e5362a3ff..cc429b567d0a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
489 unsigned long timeout; 489 unsigned long timeout;
490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); 490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
491 491
492 if (percpu_ref_is_zero(&mr->refcount)) 492 if (mr->lkey) {
493 return 0; 493 /* avoid dma mr */
494 /* avoid dma mr */
495 if (mr->lkey)
496 rvt_dereg_clean_qps(mr); 494 rvt_dereg_clean_qps(mr);
495 /* @mr was indexed on rcu protected @lkey_table */
496 synchronize_rcu();
497 }
498
497 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); 499 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
498 if (!timeout) { 500 if (!timeout) {
499 rvt_pr_err(rdi, 501 rvt_pr_err(rdi,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 1f316d66e6f7..41614c185918 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
218{ 218{
219 struct matrix_keypad *keypad = input_get_drvdata(dev); 219 struct matrix_keypad *keypad = input_get_drvdata(dev);
220 220
221 spin_lock_irq(&keypad->lock);
221 keypad->stopped = true; 222 keypad->stopped = true;
222 mb(); 223 spin_unlock_irq(&keypad->lock);
224
223 flush_work(&keypad->work.work); 225 flush_work(&keypad->work.work);
224 /* 226 /*
225 * matrix_keypad_scan() will leave IRQs enabled; 227 * matrix_keypad_scan() will leave IRQs enabled;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3d2e23a0ae39..a246fc686bb7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = {
173 "LEN0046", /* X250 */ 173 "LEN0046", /* X250 */
174 "LEN004a", /* W541 */ 174 "LEN004a", /* W541 */
175 "LEN200f", /* T450s */ 175 "LEN200f", /* T450s */
176 "LEN2018", /* T460p */
177 NULL 176 NULL
178}; 177};
179 178
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index db4f6bb502e3..a5ab774da4cc 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -1,11 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd 2// Melfas MMS114/MMS152 touchscreen device driver
3 * Author: Joonyoung Shim <jy0922.shim@samsung.com> 3//
4 * 4// Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify 5// Author: Joonyoung Shim <jy0922.shim@samsung.com>
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9 6
10#include <linux/module.h> 7#include <linux/module.h>
11#include <linux/delay.h> 8#include <linux/delay.h>
@@ -624,4 +621,4 @@ module_i2c_driver(mms114_driver);
624/* Module information */ 621/* Module information */
625MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); 622MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
626MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); 623MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver");
627MODULE_LICENSE("GPL"); 624MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1d3056f53747..2cbb19cddbf8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = {
1412 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 1412 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1413 */ 1413 */
1414#define IRQS_PER_CHUNK_SHIFT 5 1414#define IRQS_PER_CHUNK_SHIFT 5
1415#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 1415#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
1416#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1416#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1417 1417
1418static unsigned long *lpi_bitmap; 1418static unsigned long *lpi_bitmap;
@@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2119 2119
2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2121 /* 2121 /*
2122 * At least one bit of EventID is being used, hence a minimum 2122 * We allocate at least one chunk worth of LPIs bet device,
2123 * of two entries. No, the architecture doesn't let you 2123 * and thus that many ITEs. The device may require less though.
2124 * express an ITT with a single entry.
2125 */ 2124 */
2126 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 2125 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
2127 sz = nr_ites * its->ite_size; 2126 sz = nr_ites * its->ite_size;
2128 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2127 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2129 itt = kzalloc(sz, GFP_KERNEL); 2128 itt = kzalloc(sz, GFP_KERNEL);
@@ -2495,7 +2494,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
2495 2494
2496static void its_vpe_schedule(struct its_vpe *vpe) 2495static void its_vpe_schedule(struct its_vpe *vpe)
2497{ 2496{
2498 void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2497 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2499 u64 val; 2498 u64 val;
2500 2499
2501 /* Schedule the VPE */ 2500 /* Schedule the VPE */
@@ -2527,7 +2526,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
2527 2526
2528static void its_vpe_deschedule(struct its_vpe *vpe) 2527static void its_vpe_deschedule(struct its_vpe *vpe)
2529{ 2528{
2530 void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2529 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2531 u32 count = 1000000; /* 1s! */ 2530 u32 count = 1000000; /* 1s! */
2532 bool clean; 2531 bool clean;
2533 u64 val; 2532 u64 val;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 675eda5ff2b8..4760307ab43f 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -28,20 +28,6 @@ struct gpcv2_irqchip_data {
28 28
29static struct gpcv2_irqchip_data *imx_gpcv2_instance; 29static struct gpcv2_irqchip_data *imx_gpcv2_instance;
30 30
31/*
32 * Interface for the low level wakeup code.
33 */
34u32 imx_gpcv2_get_wakeup_source(u32 **sources)
35{
36 if (!imx_gpcv2_instance)
37 return 0;
38
39 if (sources)
40 *sources = imx_gpcv2_instance->wakeup_sources;
41
42 return IMR_NUM;
43}
44
45static int gpcv2_wakeup_source_save(void) 31static int gpcv2_wakeup_source_save(void)
46{ 32{
47 struct gpcv2_irqchip_data *cd; 33 struct gpcv2_irqchip_data *cd;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 4d1d8dfb2d2a..f2273143b3cb 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
963 uint32_t rtime = cpu_to_le32(get_seconds()); 963 uint32_t rtime = cpu_to_le32(get_seconds());
964 struct uuid_entry *u; 964 struct uuid_entry *u;
965 char buf[BDEVNAME_SIZE]; 965 char buf[BDEVNAME_SIZE];
966 struct cached_dev *exist_dc, *t;
966 967
967 bdevname(dc->bdev, buf); 968 bdevname(dc->bdev, buf);
968 969
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
987 return -EINVAL; 988 return -EINVAL;
988 } 989 }
989 990
991 /* Check whether already attached */
992 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
993 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
994 pr_err("Tried to attach %s but duplicate UUID already attached",
995 buf);
996
997 return -EINVAL;
998 }
999 }
1000
990 u = uuid_find(c, dc->sb.uuid); 1001 u = uuid_find(c, dc->sb.uuid);
991 1002
992 if (u && 1003 if (u &&
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1204 1215
1205 return; 1216 return;
1206err: 1217err:
1207 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1218 pr_notice("error %s: %s", bdevname(bdev, name), err);
1208 bcache_device_stop(&dc->disk); 1219 bcache_device_stop(&dc->disk);
1209} 1220}
1210 1221
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1883 const char *err = NULL; /* must be set for any error case */ 1894 const char *err = NULL; /* must be set for any error case */
1884 int ret = 0; 1895 int ret = 0;
1885 1896
1897 bdevname(bdev, name);
1898
1886 memcpy(&ca->sb, sb, sizeof(struct cache_sb)); 1899 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
1887 ca->bdev = bdev; 1900 ca->bdev = bdev;
1888 ca->bdev->bd_holder = ca; 1901 ca->bdev->bd_holder = ca;
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1891 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; 1904 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
1892 get_page(sb_page); 1905 get_page(sb_page);
1893 1906
1894 if (blk_queue_discard(bdev_get_queue(ca->bdev))) 1907 if (blk_queue_discard(bdev_get_queue(bdev)))
1895 ca->discard = CACHE_DISCARD(&ca->sb); 1908 ca->discard = CACHE_DISCARD(&ca->sb);
1896 1909
1897 ret = cache_alloc(ca); 1910 ret = cache_alloc(ca);
1898 if (ret != 0) { 1911 if (ret != 0) {
1912 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1899 if (ret == -ENOMEM) 1913 if (ret == -ENOMEM)
1900 err = "cache_alloc(): -ENOMEM"; 1914 err = "cache_alloc(): -ENOMEM";
1901 else 1915 else
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
1918 goto out; 1932 goto out;
1919 } 1933 }
1920 1934
1921 pr_info("registered cache device %s", bdevname(bdev, name)); 1935 pr_info("registered cache device %s", name);
1922 1936
1923out: 1937out:
1924 kobject_put(&ca->kobj); 1938 kobject_put(&ca->kobj);
1925 1939
1926err: 1940err:
1927 if (err) 1941 if (err)
1928 pr_notice("error opening %s: %s", bdevname(bdev, name), err); 1942 pr_notice("error %s: %s", name, err);
1929 1943
1930 return ret; 1944 return ret;
1931} 1945}
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2014 if (err) 2028 if (err)
2015 goto err_close; 2029 goto err_close;
2016 2030
2031 err = "failed to register device";
2017 if (SB_IS_BDEV(sb)) { 2032 if (SB_IS_BDEV(sb)) {
2018 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 2033 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2019 if (!dc) 2034 if (!dc)
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2028 goto err_close; 2043 goto err_close;
2029 2044
2030 if (register_cache(sb, sb_page, bdev, ca) != 0) 2045 if (register_cache(sb, sb_page, bdev, ca) != 0)
2031 goto err_close; 2046 goto err;
2032 } 2047 }
2033out: 2048out:
2034 if (sb_page) 2049 if (sb_page)
@@ -2041,7 +2056,7 @@ out:
2041err_close: 2056err_close:
2042 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2057 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2043err: 2058err:
2044 pr_info("error opening %s: %s", path, err); 2059 pr_info("error %s: %s", path, err);
2045 ret = -EINVAL; 2060 ret = -EINVAL;
2046 goto out; 2061 goto out;
2047} 2062}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 414c9af54ded..aa2032fa80d4 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void)
386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, 386static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
387 enum data_mode *data_mode) 387 enum data_mode *data_mode)
388{ 388{
389 unsigned noio_flag;
390 void *ptr;
391
392 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { 389 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
393 *data_mode = DATA_MODE_SLAB; 390 *data_mode = DATA_MODE_SLAB;
394 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); 391 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
412 * all allocations done by this process (including pagetables) are done 409 * all allocations done by this process (including pagetables) are done
413 * as if GFP_NOIO was specified. 410 * as if GFP_NOIO was specified.
414 */ 411 */
412 if (gfp_mask & __GFP_NORETRY) {
413 unsigned noio_flag = memalloc_noio_save();
414 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
415 415
416 if (gfp_mask & __GFP_NORETRY)
417 noio_flag = memalloc_noio_save();
418
419 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
420
421 if (gfp_mask & __GFP_NORETRY)
422 memalloc_noio_restore(noio_flag); 416 memalloc_noio_restore(noio_flag);
417 return ptr;
418 }
423 419
424 return ptr; 420 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
425} 421}
426 422
427/* 423/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 7d3e572072f5..a05a560d3cba 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -211,29 +211,27 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
211 else 211 else
212 m->queue_mode = DM_TYPE_REQUEST_BASED; 212 m->queue_mode = DM_TYPE_REQUEST_BASED;
213 213
214 } else if (m->queue_mode == DM_TYPE_BIO_BASED || 214 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
215 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
216 INIT_WORK(&m->process_queued_bios, process_queued_bios); 215 INIT_WORK(&m->process_queued_bios, process_queued_bios);
217 216 /*
218 if (m->queue_mode == DM_TYPE_BIO_BASED) { 217 * bio-based doesn't support any direct scsi_dh management;
219 /* 218 * it just discovers if a scsi_dh is attached.
220 * bio-based doesn't support any direct scsi_dh management; 219 */
221 * it just discovers if a scsi_dh is attached. 220 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
222 */
223 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
224 }
225 }
226
227 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
228 set_bit(MPATHF_QUEUE_IO, &m->flags);
229 atomic_set(&m->pg_init_in_progress, 0);
230 atomic_set(&m->pg_init_count, 0);
231 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
232 init_waitqueue_head(&m->pg_init_wait);
233 } 221 }
234 222
235 dm_table_set_type(ti->table, m->queue_mode); 223 dm_table_set_type(ti->table, m->queue_mode);
236 224
225 /*
226 * Init fields that are only used when a scsi_dh is attached
227 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
228 */
229 set_bit(MPATHF_QUEUE_IO, &m->flags);
230 atomic_set(&m->pg_init_in_progress, 0);
231 atomic_set(&m->pg_init_count, 0);
232 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
233 init_waitqueue_head(&m->pg_init_wait);
234
237 return 0; 235 return 0;
238} 236}
239 237
@@ -337,9 +335,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
337{ 335{
338 m->current_pg = pg; 336 m->current_pg = pg;
339 337
340 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
341 return;
342
343 /* Must we initialise the PG first, and queue I/O till it's ready? */ 338 /* Must we initialise the PG first, and queue I/O till it's ready? */
344 if (m->hw_handler_name) { 339 if (m->hw_handler_name) {
345 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 340 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
@@ -385,8 +380,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
385 unsigned bypassed = 1; 380 unsigned bypassed = 1;
386 381
387 if (!atomic_read(&m->nr_valid_paths)) { 382 if (!atomic_read(&m->nr_valid_paths)) {
388 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) 383 clear_bit(MPATHF_QUEUE_IO, &m->flags);
389 clear_bit(MPATHF_QUEUE_IO, &m->flags);
390 goto failed; 384 goto failed;
391 } 385 }
392 386
@@ -599,7 +593,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
599 return pgpath; 593 return pgpath;
600} 594}
601 595
602static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) 596static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio)
603{ 597{
604 struct pgpath *pgpath; 598 struct pgpath *pgpath;
605 unsigned long flags; 599 unsigned long flags;
@@ -634,8 +628,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
634{ 628{
635 struct pgpath *pgpath; 629 struct pgpath *pgpath;
636 630
637 if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) 631 if (!m->hw_handler_name)
638 pgpath = __map_bio_nvme(m, bio); 632 pgpath = __map_bio_fast(m, bio);
639 else 633 else
640 pgpath = __map_bio(m, bio); 634 pgpath = __map_bio(m, bio);
641 635
@@ -675,8 +669,7 @@ static void process_queued_io_list(struct multipath *m)
675{ 669{
676 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) 670 if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
677 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); 671 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
678 else if (m->queue_mode == DM_TYPE_BIO_BASED || 672 else if (m->queue_mode == DM_TYPE_BIO_BASED)
679 m->queue_mode == DM_TYPE_NVME_BIO_BASED)
680 queue_work(kmultipathd, &m->process_queued_bios); 673 queue_work(kmultipathd, &m->process_queued_bios);
681} 674}
682 675
@@ -811,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
811 return 0; 804 return 0;
812} 805}
813 806
814static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) 807static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
808 const char *attached_handler_name, char **error)
815{ 809{
816 struct request_queue *q = bdev_get_queue(bdev); 810 struct request_queue *q = bdev_get_queue(bdev);
817 const char *attached_handler_name;
818 int r; 811 int r;
819 812
820 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { 813 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
821retain: 814retain:
822 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
823 if (attached_handler_name) { 815 if (attached_handler_name) {
824 /* 816 /*
825 * Clear any hw_handler_params associated with a 817 * Clear any hw_handler_params associated with a
@@ -873,6 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
873 int r; 865 int r;
874 struct pgpath *p; 866 struct pgpath *p;
875 struct multipath *m = ti->private; 867 struct multipath *m = ti->private;
868 struct request_queue *q;
869 const char *attached_handler_name;
876 870
877 /* we need at least a path arg */ 871 /* we need at least a path arg */
878 if (as->argc < 1) { 872 if (as->argc < 1) {
@@ -891,9 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
891 goto bad; 885 goto bad;
892 } 886 }
893 887
894 if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { 888 q = bdev_get_queue(p->path.dev->bdev);
889 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
890 if (attached_handler_name) {
895 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 891 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
896 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); 892 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
897 if (r) { 893 if (r) {
898 dm_put_device(ti, p->path.dev); 894 dm_put_device(ti, p->path.dev);
899 goto bad; 895 goto bad;
@@ -1001,8 +997,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1001 if (!hw_argc) 997 if (!hw_argc)
1002 return 0; 998 return 0;
1003 999
1004 if (m->queue_mode == DM_TYPE_BIO_BASED || 1000 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1005 m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
1006 dm_consume_args(as, hw_argc); 1001 dm_consume_args(as, hw_argc);
1007 DMERR("bio-based multipath doesn't allow hardware handler args"); 1002 DMERR("bio-based multipath doesn't allow hardware handler args");
1008 return 0; 1003 return 0;
@@ -1091,8 +1086,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
1091 1086
1092 if (!strcasecmp(queue_mode_name, "bio")) 1087 if (!strcasecmp(queue_mode_name, "bio"))
1093 m->queue_mode = DM_TYPE_BIO_BASED; 1088 m->queue_mode = DM_TYPE_BIO_BASED;
1094 else if (!strcasecmp(queue_mode_name, "nvme"))
1095 m->queue_mode = DM_TYPE_NVME_BIO_BASED;
1096 else if (!strcasecmp(queue_mode_name, "rq")) 1089 else if (!strcasecmp(queue_mode_name, "rq"))
1097 m->queue_mode = DM_TYPE_REQUEST_BASED; 1090 m->queue_mode = DM_TYPE_REQUEST_BASED;
1098 else if (!strcasecmp(queue_mode_name, "mq")) 1091 else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1193,7 +1186,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1193 ti->num_discard_bios = 1; 1186 ti->num_discard_bios = 1;
1194 ti->num_write_same_bios = 1; 1187 ti->num_write_same_bios = 1;
1195 ti->num_write_zeroes_bios = 1; 1188 ti->num_write_zeroes_bios = 1;
1196 if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) 1189 if (m->queue_mode == DM_TYPE_BIO_BASED)
1197 ti->per_io_data_size = multipath_per_bio_data_size(); 1190 ti->per_io_data_size = multipath_per_bio_data_size();
1198 else 1191 else
1199 ti->per_io_data_size = sizeof(struct dm_mpath_io); 1192 ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1730,9 +1723,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
1730 case DM_TYPE_BIO_BASED: 1723 case DM_TYPE_BIO_BASED:
1731 DMEMIT("queue_mode bio "); 1724 DMEMIT("queue_mode bio ");
1732 break; 1725 break;
1733 case DM_TYPE_NVME_BIO_BASED:
1734 DMEMIT("queue_mode nvme ");
1735 break;
1736 case DM_TYPE_MQ_REQUEST_BASED: 1726 case DM_TYPE_MQ_REQUEST_BASED:
1737 DMEMIT("queue_mode mq "); 1727 DMEMIT("queue_mode mq ");
1738 break; 1728 break;
@@ -2030,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti)
2030 *---------------------------------------------------------------*/ 2020 *---------------------------------------------------------------*/
2031static struct target_type multipath_target = { 2021static struct target_type multipath_target = {
2032 .name = "multipath", 2022 .name = "multipath",
2033 .version = {1, 12, 0}, 2023 .version = {1, 13, 0},
2034 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, 2024 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2025 DM_TARGET_PASSES_INTEGRITY,
2035 .module = THIS_MODULE, 2026 .module = THIS_MODULE,
2036 .ctr = multipath_ctr, 2027 .ctr = multipath_ctr,
2037 .dtr = multipath_dtr, 2028 .dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 7ef469e902c6..c1d1034ff7b7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3408 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3409 3409
3410 } else { 3410 } else {
3411 if (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3411 if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
3412 test_bit(MD_RECOVERY_RESHAPE, &recovery) || 3412 (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
3413 test_bit(MD_RECOVERY_RUNNING, &recovery)) 3413 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3414 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3414 r = mddev->curr_resync_completed; 3415 r = mddev->curr_resync_completed;
3415 else 3416 else
3416 r = mddev->recovery_cp; 3417 r = mddev->recovery_cp;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5fe7ec356c33..7eb3e2a3c07d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t)
942 942
943 if (t->type != DM_TYPE_NONE) { 943 if (t->type != DM_TYPE_NONE) {
944 /* target already set the table's type */ 944 /* target already set the table's type */
945 if (t->type == DM_TYPE_BIO_BASED) 945 if (t->type == DM_TYPE_BIO_BASED) {
946 return 0; 946 /* possibly upgrade to a variant of bio-based */
947 else if (t->type == DM_TYPE_NVME_BIO_BASED) { 947 goto verify_bio_based;
948 if (!dm_table_does_not_support_partial_completion(t)) {
949 DMERR("nvme bio-based is only possible with devices"
950 " that don't support partial completion");
951 return -EINVAL;
952 }
953 /* Fallthru, also verify all devices are blk-mq */
954 } 948 }
955 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); 949 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
950 BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
956 goto verify_rq_based; 951 goto verify_rq_based;
957 } 952 }
958 953
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t)
985 } 980 }
986 981
987 if (bio_based) { 982 if (bio_based) {
983verify_bio_based:
988 /* We must use this table as bio-based */ 984 /* We must use this table as bio-based */
989 t->type = DM_TYPE_BIO_BASED; 985 t->type = DM_TYPE_BIO_BASED;
990 if (dm_table_supports_dax(t) || 986 if (dm_table_supports_dax(t) ||
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev
1755 char b[BDEVNAME_SIZE]; 1751 char b[BDEVNAME_SIZE];
1756 1752
1757 /* For now, NVMe devices are the only devices of this class */ 1753 /* For now, NVMe devices are the only devices of this class */
1758 return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); 1754 return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
1759} 1755}
1760 1756
1761static bool dm_table_does_not_support_partial_completion(struct dm_table *t) 1757static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68136806d365..45328d8b2859 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
458 return dm_get_geometry(md, geo); 458 return dm_get_geometry(md, geo);
459} 459}
460 460
461static int dm_grab_bdev_for_ioctl(struct mapped_device *md, 461static char *_dm_claim_ptr = "I belong to device-mapper";
462 struct block_device **bdev, 462
463 fmode_t *mode) 463static int dm_get_bdev_for_ioctl(struct mapped_device *md,
464 struct block_device **bdev,
465 fmode_t *mode)
464{ 466{
465 struct dm_target *tgt; 467 struct dm_target *tgt;
466 struct dm_table *map; 468 struct dm_table *map;
@@ -490,6 +492,10 @@ retry:
490 goto out; 492 goto out;
491 493
492 bdgrab(*bdev); 494 bdgrab(*bdev);
495 r = blkdev_get(*bdev, *mode, _dm_claim_ptr);
496 if (r < 0)
497 goto out;
498
493 dm_put_live_table(md, srcu_idx); 499 dm_put_live_table(md, srcu_idx);
494 return r; 500 return r;
495 501
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
508 struct mapped_device *md = bdev->bd_disk->private_data; 514 struct mapped_device *md = bdev->bd_disk->private_data;
509 int r; 515 int r;
510 516
511 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 517 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
512 if (r < 0) 518 if (r < 0)
513 return r; 519 return r;
514 520
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
528 534
529 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 535 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
530out: 536out:
531 bdput(bdev); 537 blkdev_put(bdev, mode);
532 return r; 538 return r;
533} 539}
534 540
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
708static int open_table_device(struct table_device *td, dev_t dev, 714static int open_table_device(struct table_device *td, dev_t dev,
709 struct mapped_device *md) 715 struct mapped_device *md)
710{ 716{
711 static char *_claim_ptr = "I belong to device-mapper";
712 struct block_device *bdev; 717 struct block_device *bdev;
713 718
714 int r; 719 int r;
715 720
716 BUG_ON(td->dm_dev.bdev); 721 BUG_ON(td->dm_dev.bdev);
717 722
718 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); 723 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
719 if (IS_ERR(bdev)) 724 if (IS_ERR(bdev))
720 return PTR_ERR(bdev); 725 return PTR_ERR(bdev);
721 726
@@ -3011,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3011 fmode_t mode; 3016 fmode_t mode;
3012 int r; 3017 int r;
3013 3018
3014 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3019 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3015 if (r < 0) 3020 if (r < 0)
3016 return r; 3021 return r;
3017 3022
@@ -3021,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3021 else 3026 else
3022 r = -EOPNOTSUPP; 3027 r = -EOPNOTSUPP;
3023 3028
3024 bdput(bdev); 3029 blkdev_put(bdev, mode);
3025 return r; 3030 return r;
3026} 3031}
3027 3032
@@ -3032,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3032 fmode_t mode; 3037 fmode_t mode;
3033 int r; 3038 int r;
3034 3039
3035 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3040 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3036 if (r < 0) 3041 if (r < 0)
3037 return r; 3042 return r;
3038 3043
@@ -3042,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3042 else 3047 else
3043 r = -EOPNOTSUPP; 3048 r = -EOPNOTSUPP;
3044 3049
3045 bdput(bdev); 3050 blkdev_put(bdev, mode);
3046 return r; 3051 return r;
3047} 3052}
3048 3053
@@ -3054,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3054 fmode_t mode; 3059 fmode_t mode;
3055 int r; 3060 int r;
3056 3061
3057 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3062 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3058 if (r < 0) 3063 if (r < 0)
3059 return r; 3064 return r;
3060 3065
@@ -3064,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3064 else 3069 else
3065 r = -EOPNOTSUPP; 3070 r = -EOPNOTSUPP;
3066 3071
3067 bdput(bdev); 3072 blkdev_put(bdev, mode);
3068 return r; 3073 return r;
3069} 3074}
3070 3075
@@ -3075,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3075 fmode_t mode; 3080 fmode_t mode;
3076 int r; 3081 int r;
3077 3082
3078 r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); 3083 r = dm_get_bdev_for_ioctl(md, &bdev, &mode);
3079 if (r < 0) 3084 if (r < 0)
3080 return r; 3085 return r;
3081 3086
@@ -3085,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
3085 else 3090 else
3086 r = -EOPNOTSUPP; 3091 r = -EOPNOTSUPP;
3087 3092
3088 bdput(bdev); 3093 blkdev_put(bdev, mode);
3089 return r; 3094 return r;
3090} 3095}
3091 3096
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index 337462e1569f..038509e5d031 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
102 return rc; 102 return rc;
103} 103}
104 104
105static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
106 struct ocxl_ioctl_metadata __user *uarg)
107{
108 struct ocxl_ioctl_metadata arg;
109
110 memset(&arg, 0, sizeof(arg));
111
112 arg.version = 0;
113
114 arg.afu_version_major = ctx->afu->config.version_major;
115 arg.afu_version_minor = ctx->afu->config.version_minor;
116 arg.pasid = ctx->pasid;
117 arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride;
118 arg.global_mmio_size = ctx->afu->config.global_mmio_size;
119
120 if (copy_to_user(uarg, &arg, sizeof(arg)))
121 return -EFAULT;
122
123 return 0;
124}
125
105#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ 126#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \
106 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ 127 x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \
107 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ 128 x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \
108 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ 129 x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \
130 x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \
109 "UNKNOWN") 131 "UNKNOWN")
110 132
111static long afu_ioctl(struct file *file, unsigned int cmd, 133static long afu_ioctl(struct file *file, unsigned int cmd,
@@ -159,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
159 irq_fd.eventfd); 181 irq_fd.eventfd);
160 break; 182 break;
161 183
184 case OCXL_IOCTL_GET_METADATA:
185 rc = afu_ioctl_get_metadata(ctx,
186 (struct ocxl_ioctl_metadata __user *) args);
187 break;
188
162 default: 189 default:
163 rc = -EINVAL; 190 rc = -EINVAL;
164 } 191 }
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 20135a5de748..2cfb963d9f37 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -72,6 +72,7 @@ MODULE_ALIAS("mmc:block");
72#define MMC_BLK_TIMEOUT_MS (10 * 1000) 72#define MMC_BLK_TIMEOUT_MS (10 * 1000)
73#define MMC_SANITIZE_REQ_TIMEOUT 240000 73#define MMC_SANITIZE_REQ_TIMEOUT 240000
74#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 74#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
75#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
75 76
76#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ 77#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
77 (rq_data_dir(req) == WRITE)) 78 (rq_data_dir(req) == WRITE))
@@ -587,6 +588,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
587 } 588 }
588 589
589 /* 590 /*
591 * Make sure the cache of the PARTITION_CONFIG register and
592 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
593 * changed it successfully.
594 */
595 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
596 (cmd.opcode == MMC_SWITCH)) {
597 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
598 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
599
600 /*
601 * Update cache so the next mmc_blk_part_switch call operates
602 * on up-to-date data.
603 */
604 card->ext_csd.part_config = value;
605 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
606 }
607
608 /*
590 * According to the SD specs, some commands require a delay after 609 * According to the SD specs, some commands require a delay after
591 * issuing the command. 610 * issuing the command.
592 */ 611 */
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 79a5b985ccf5..9c821eedd156 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -82,6 +82,7 @@ struct mmc_fixup {
82#define CID_MANFID_APACER 0x27 82#define CID_MANFID_APACER 0x27
83#define CID_MANFID_KINGSTON 0x70 83#define CID_MANFID_KINGSTON 0x70
84#define CID_MANFID_HYNIX 0x90 84#define CID_MANFID_HYNIX 0x90
85#define CID_MANFID_NUMONYX 0xFE
85 86
86#define END_FIXUP { NULL } 87#define END_FIXUP { NULL }
87 88
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 75d317623852..5153577754f0 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -109,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
109 */ 109 */
110 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, 110 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
111 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), 111 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
112 /*
113 * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI
114 * feature is used so disable the HPI feature for such buggy cards.
115 */
116 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
117 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
112 118
113 END_FIXUP 119 END_FIXUP
114}; 120};
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index fa41d9422d57..a84aa3f1ae85 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
165static int dw_mci_exynos_runtime_resume(struct device *dev) 165static int dw_mci_exynos_runtime_resume(struct device *dev)
166{ 166{
167 struct dw_mci *host = dev_get_drvdata(dev); 167 struct dw_mci *host = dev_get_drvdata(dev);
168 int ret;
169
170 ret = dw_mci_runtime_resume(dev);
171 if (ret)
172 return ret;
168 173
169 dw_mci_exynos_config_smu(host); 174 dw_mci_exynos_config_smu(host);
170 return dw_mci_runtime_resume(dev); 175
176 return ret;
171} 177}
172 178
173/** 179/**
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d9b4acefed31..06d47414d0c1 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -413,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host)
413 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 413 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
414 if (cto_div == 0) 414 if (cto_div == 0)
415 cto_div = 1; 415 cto_div = 1;
416 cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); 416
417 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
418 host->bus_hz);
417 419
418 /* add a bit spare time */ 420 /* add a bit spare time */
419 cto_ms += 10; 421 cto_ms += 10;
@@ -562,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
562 (sizeof(struct idmac_desc_64addr) * 564 (sizeof(struct idmac_desc_64addr) *
563 (i + 1))) >> 32; 565 (i + 1))) >> 32;
564 /* Initialize reserved and buffer size fields to "0" */ 566 /* Initialize reserved and buffer size fields to "0" */
567 p->des0 = 0;
565 p->des1 = 0; 568 p->des1 = 0;
566 p->des2 = 0; 569 p->des2 = 0;
567 p->des3 = 0; 570 p->des3 = 0;
@@ -584,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
584 i++, p++) { 587 i++, p++) {
585 p->des3 = cpu_to_le32(host->sg_dma + 588 p->des3 = cpu_to_le32(host->sg_dma +
586 (sizeof(struct idmac_desc) * (i + 1))); 589 (sizeof(struct idmac_desc) * (i + 1)));
590 p->des0 = 0;
587 p->des1 = 0; 591 p->des1 = 0;
588 } 592 }
589 593
@@ -1799,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host)
1799 } 1803 }
1800 1804
1801 if (host->use_dma == TRANS_MODE_IDMAC) 1805 if (host->use_dma == TRANS_MODE_IDMAC)
1802 /* It is also recommended that we reset and reprogram idmac */ 1806 /* It is also required that we reinit idmac */
1803 dw_mci_idmac_reset(host); 1807 dw_mci_idmac_init(host);
1804 1808
1805 ret = true; 1809 ret = true;
1806 1810
@@ -1948,8 +1952,9 @@ static void dw_mci_set_drto(struct dw_mci *host)
1948 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 1952 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1949 if (drto_div == 0) 1953 if (drto_div == 0)
1950 drto_div = 1; 1954 drto_div = 1;
1951 drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, 1955
1952 host->bus_hz); 1956 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1957 host->bus_hz);
1953 1958
1954 /* add a bit spare time */ 1959 /* add a bit spare time */
1955 drto_ms += 10; 1960 drto_ms += 10;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 4065da58789d..32321bd596d8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -680,7 +680,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
680 host->hw_name = "ACPI"; 680 host->hw_name = "ACPI";
681 host->ops = &sdhci_acpi_ops_dflt; 681 host->ops = &sdhci_acpi_ops_dflt;
682 host->irq = platform_get_irq(pdev, 0); 682 host->irq = platform_get_irq(pdev, 0);
683 if (host->irq <= 0) { 683 if (host->irq < 0) {
684 err = -EINVAL; 684 err = -EINVAL;
685 goto err_free; 685 goto err_free;
686 } 686 }
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f3..6da69af103e6 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
390 return 0; 390 return 0;
391} 391}
392 392
393static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) 393static void cc770_tx(struct net_device *dev, int mo)
394{ 394{
395 struct cc770_priv *priv = netdev_priv(dev); 395 struct cc770_priv *priv = netdev_priv(dev);
396 struct net_device_stats *stats = &dev->stats; 396 struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
397 struct can_frame *cf = (struct can_frame *)skb->data;
398 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
399 u8 dlc, rtr; 397 u8 dlc, rtr;
400 u32 id; 398 u32 id;
401 int i; 399 int i;
402 400
403 if (can_dropped_invalid_skb(dev, skb))
404 return NETDEV_TX_OK;
405
406 if ((cc770_read_reg(priv,
407 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
408 netdev_err(dev, "TX register is still occupied!\n");
409 return NETDEV_TX_BUSY;
410 }
411
412 netif_stop_queue(dev);
413
414 dlc = cf->can_dlc; 401 dlc = cf->can_dlc;
415 id = cf->can_id; 402 id = cf->can_id;
416 if (cf->can_id & CAN_RTR_FLAG) 403 rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
417 rtr = 0; 404
418 else 405 cc770_write_reg(priv, msgobj[mo].ctrl0,
419 rtr = MSGCFG_DIR; 406 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
420 cc770_write_reg(priv, msgobj[mo].ctrl1, 407 cc770_write_reg(priv, msgobj[mo].ctrl1,
421 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); 408 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
422 cc770_write_reg(priv, msgobj[mo].ctrl0, 409
423 MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
424 if (id & CAN_EFF_FLAG) { 410 if (id & CAN_EFF_FLAG) {
425 id &= CAN_EFF_MASK; 411 id &= CAN_EFF_MASK;
426 cc770_write_reg(priv, msgobj[mo].config, 412 cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
439 for (i = 0; i < dlc; i++) 425 for (i = 0; i < dlc; i++)
440 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); 426 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
441 427
442 /* Store echo skb before starting the transfer */
443 can_put_echo_skb(skb, dev, 0);
444
445 cc770_write_reg(priv, msgobj[mo].ctrl1, 428 cc770_write_reg(priv, msgobj[mo].ctrl1,
446 RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); 429 RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
430 cc770_write_reg(priv, msgobj[mo].ctrl0,
431 MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
432}
447 433
448 stats->tx_bytes += dlc; 434static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
435{
436 struct cc770_priv *priv = netdev_priv(dev);
437 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
449 438
439 if (can_dropped_invalid_skb(dev, skb))
440 return NETDEV_TX_OK;
450 441
451 /* 442 netif_stop_queue(dev);
452 * HM: We had some cases of repeated IRQs so make sure the 443
453 * INT is acknowledged I know it's already further up, but 444 if ((cc770_read_reg(priv,
454 * doing again fixed the issue 445 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
455 */ 446 netdev_err(dev, "TX register is still occupied!\n");
456 cc770_write_reg(priv, msgobj[mo].ctrl0, 447 return NETDEV_TX_BUSY;
457 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 448 }
449
450 priv->tx_skb = skb;
451 cc770_tx(dev, mo);
458 452
459 return NETDEV_TX_OK; 453 return NETDEV_TX_OK;
460} 454}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
680 struct cc770_priv *priv = netdev_priv(dev); 674 struct cc770_priv *priv = netdev_priv(dev);
681 struct net_device_stats *stats = &dev->stats; 675 struct net_device_stats *stats = &dev->stats;
682 unsigned int mo = obj2msgobj(o); 676 unsigned int mo = obj2msgobj(o);
677 struct can_frame *cf;
678 u8 ctrl1;
679
680 ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
683 681
684 /* Nothing more to send, switch off interrupts */
685 cc770_write_reg(priv, msgobj[mo].ctrl0, 682 cc770_write_reg(priv, msgobj[mo].ctrl0,
686 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); 683 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
687 /* 684 cc770_write_reg(priv, msgobj[mo].ctrl1,
688 * We had some cases of repeated IRQ so make sure the 685 RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
689 * INT is acknowledged 686
687 if (unlikely(!priv->tx_skb)) {
688 netdev_err(dev, "missing tx skb in tx interrupt\n");
689 return;
690 }
691
692 if (unlikely(ctrl1 & MSGLST_SET)) {
693 stats->rx_over_errors++;
694 stats->rx_errors++;
695 }
696
697 /* When the CC770 is sending an RTR message and it receives a regular
698 * message that matches the id of the RTR message, it will overwrite the
699 * outgoing message in the TX register. When this happens we must
700 * process the received message and try to transmit the outgoing skb
701 * again.
690 */ 702 */
691 cc770_write_reg(priv, msgobj[mo].ctrl0, 703 if (unlikely(ctrl1 & NEWDAT_SET)) {
692 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 704 cc770_rx(dev, mo, ctrl1);
705 cc770_tx(dev, mo);
706 return;
707 }
693 708
709 cf = (struct can_frame *)priv->tx_skb->data;
710 stats->tx_bytes += cf->can_dlc;
694 stats->tx_packets++; 711 stats->tx_packets++;
712
713 can_put_echo_skb(priv->tx_skb, dev, 0);
695 can_get_echo_skb(dev, 0); 714 can_get_echo_skb(dev, 0);
715 priv->tx_skb = NULL;
716
696 netif_wake_queue(dev); 717 netif_wake_queue(dev);
697} 718}
698 719
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
804 priv->can.do_set_bittiming = cc770_set_bittiming; 825 priv->can.do_set_bittiming = cc770_set_bittiming;
805 priv->can.do_set_mode = cc770_set_mode; 826 priv->can.do_set_mode = cc770_set_mode;
806 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 827 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
828 priv->tx_skb = NULL;
807 829
808 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); 830 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
809 831
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
193 u8 cpu_interface; /* CPU interface register */ 193 u8 cpu_interface; /* CPU interface register */
194 u8 clkout; /* Clock out register */ 194 u8 clkout; /* Clock out register */
195 u8 bus_config; /* Bus conffiguration register */ 195 u8 bus_config; /* Bus conffiguration register */
196
197 struct sk_buff *tx_skb;
196}; 198};
197 199
198struct net_device *alloc_cc770dev(int sizeof_priv); 200struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2772d05ff11c..fedd927ba6ed 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -30,6 +30,7 @@
30#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) 30#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
31#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) 31#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
32#define IFI_CANFD_STCMD_BUSOFF BIT(4) 32#define IFI_CANFD_STCMD_BUSOFF BIT(4)
33#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
33#define IFI_CANFD_STCMD_BUSMONITOR BIT(16) 34#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
34#define IFI_CANFD_STCMD_LOOPBACK BIT(18) 35#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
35#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) 36#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
@@ -52,7 +53,10 @@
52#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) 53#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
53 54
54#define IFI_CANFD_INTERRUPT 0xc 55#define IFI_CANFD_INTERRUPT 0xc
56#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
55#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) 57#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
58#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
59#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
56#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) 60#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
57#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) 61#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
58#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) 62#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
@@ -61,6 +65,10 @@
61#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) 65#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
62 66
63#define IFI_CANFD_IRQMASK 0x10 67#define IFI_CANFD_IRQMASK 0x10
68#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
69#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
70#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
71#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
64#define IFI_CANFD_IRQMASK_SET_ERR BIT(7) 72#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
65#define IFI_CANFD_IRQMASK_SET_TS BIT(15) 73#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
66#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) 74#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
@@ -136,6 +144,8 @@
136#define IFI_CANFD_SYSCLOCK 0x50 144#define IFI_CANFD_SYSCLOCK 0x50
137 145
138#define IFI_CANFD_VER 0x54 146#define IFI_CANFD_VER 0x54
147#define IFI_CANFD_VER_REV_MASK 0xff
148#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
139 149
140#define IFI_CANFD_IP_ID 0x58 150#define IFI_CANFD_IP_ID 0x58
141#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD 151#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
220 230
221 if (enable) { 231 if (enable) {
222 enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | 232 enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
223 IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; 233 IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
234 IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
235 IFI_CANFD_IRQMASK_ERROR_WARNING |
236 IFI_CANFD_IRQMASK_ERROR_BUSOFF;
224 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 237 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
225 enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; 238 enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
226 } 239 }
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
361 return 1; 374 return 1;
362} 375}
363 376
364static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) 377static int ifi_canfd_handle_lec_err(struct net_device *ndev)
365{ 378{
366 struct ifi_canfd_priv *priv = netdev_priv(ndev); 379 struct ifi_canfd_priv *priv = netdev_priv(ndev);
367 struct net_device_stats *stats = &ndev->stats; 380 struct net_device_stats *stats = &ndev->stats;
368 struct can_frame *cf; 381 struct can_frame *cf;
369 struct sk_buff *skb; 382 struct sk_buff *skb;
383 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
370 const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | 384 const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
371 IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | 385 IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
372 IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | 386 IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
449 463
450 switch (new_state) { 464 switch (new_state) {
451 case CAN_STATE_ERROR_ACTIVE: 465 case CAN_STATE_ERROR_ACTIVE:
466 /* error active state */
467 priv->can.can_stats.error_warning++;
468 priv->can.state = CAN_STATE_ERROR_ACTIVE;
469 break;
470 case CAN_STATE_ERROR_WARNING:
452 /* error warning state */ 471 /* error warning state */
453 priv->can.can_stats.error_warning++; 472 priv->can.can_stats.error_warning++;
454 priv->can.state = CAN_STATE_ERROR_WARNING; 473 priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
477 ifi_canfd_get_berr_counter(ndev, &bec); 496 ifi_canfd_get_berr_counter(ndev, &bec);
478 497
479 switch (new_state) { 498 switch (new_state) {
480 case CAN_STATE_ERROR_ACTIVE: 499 case CAN_STATE_ERROR_WARNING:
481 /* error warning state */ 500 /* error warning state */
482 cf->can_id |= CAN_ERR_CRTL; 501 cf->can_id |= CAN_ERR_CRTL;
483 cf->data[1] = (bec.txerr > bec.rxerr) ? 502 cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
510 return 1; 529 return 1;
511} 530}
512 531
513static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) 532static int ifi_canfd_handle_state_errors(struct net_device *ndev)
514{ 533{
515 struct ifi_canfd_priv *priv = netdev_priv(ndev); 534 struct ifi_canfd_priv *priv = netdev_priv(ndev);
535 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
516 int work_done = 0; 536 int work_done = 0;
517 u32 isr;
518 537
519 /* 538 if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
520 * The ErrWarn condition is a little special, since the bit is 539 (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
521 * located in the INTERRUPT register instead of STCMD register. 540 netdev_dbg(ndev, "Error, entered active state\n");
522 */ 541 work_done += ifi_canfd_handle_state_change(ndev,
523 isr = readl(priv->base + IFI_CANFD_INTERRUPT); 542 CAN_STATE_ERROR_ACTIVE);
524 if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && 543 }
544
545 if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
525 (priv->can.state != CAN_STATE_ERROR_WARNING)) { 546 (priv->can.state != CAN_STATE_ERROR_WARNING)) {
526 /* Clear the interrupt */
527 writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
528 priv->base + IFI_CANFD_INTERRUPT);
529 netdev_dbg(ndev, "Error, entered warning state\n"); 547 netdev_dbg(ndev, "Error, entered warning state\n");
530 work_done += ifi_canfd_handle_state_change(ndev, 548 work_done += ifi_canfd_handle_state_change(ndev,
531 CAN_STATE_ERROR_WARNING); 549 CAN_STATE_ERROR_WARNING);
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
552{ 570{
553 struct net_device *ndev = napi->dev; 571 struct net_device *ndev = napi->dev;
554 struct ifi_canfd_priv *priv = netdev_priv(ndev); 572 struct ifi_canfd_priv *priv = netdev_priv(ndev);
555 const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
556 IFI_CANFD_STCMD_BUSOFF;
557 int work_done = 0;
558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); 573 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 574 int work_done = 0;
562 575
563 /* Handle bus state changes */ 576 /* Handle bus state changes */
564 if ((stcmd & stcmd_state_mask) || 577 work_done += ifi_canfd_handle_state_errors(ndev);
565 ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
566 work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
567 578
568 /* Handle lost messages on RX */ 579 /* Handle lost messages on RX */
569 if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) 580 if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
571 582
572 /* Handle lec errors on the bus */ 583 /* Handle lec errors on the bus */
573 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 584 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
574 work_done += ifi_canfd_handle_lec_err(ndev, errctr); 585 work_done += ifi_canfd_handle_lec_err(ndev);
575 586
576 /* Handle normal messages on RX */ 587 /* Handle normal messages on RX */
577 if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) 588 if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
592 struct net_device_stats *stats = &ndev->stats; 603 struct net_device_stats *stats = &ndev->stats;
593 const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | 604 const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
594 IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | 605 IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
606 IFI_CANFD_INTERRUPT_ERROR_COUNTER |
607 IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
595 IFI_CANFD_INTERRUPT_ERROR_WARNING | 608 IFI_CANFD_INTERRUPT_ERROR_WARNING |
596 IFI_CANFD_INTERRUPT_ERROR_COUNTER; 609 IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
597 const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | 610 const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
598 IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; 611 IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
599 const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | 612 const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
600 IFI_CANFD_INTERRUPT_ERROR_WARNING));
601 u32 isr; 613 u32 isr;
602 614
603 isr = readl(priv->base + IFI_CANFD_INTERRUPT); 615 isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
933 struct resource *res; 945 struct resource *res;
934 void __iomem *addr; 946 void __iomem *addr;
935 int irq, ret; 947 int irq, ret;
936 u32 id; 948 u32 id, rev;
937 949
938 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 950 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 addr = devm_ioremap_resource(dev, res); 951 addr = devm_ioremap_resource(dev, res);
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
947 return -EINVAL; 959 return -EINVAL;
948 } 960 }
949 961
962 rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
963 if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
964 dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
965 rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
966 return -EINVAL;
967 }
968
950 ndev = alloc_candev(sizeof(*priv), 1); 969 ndev = alloc_candev(sizeof(*priv), 1);
951 if (!ndev) 970 if (!ndev)
952 return -ENOMEM; 971 return -ENOMEM;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2594f7779c6f..b397a33f3d32 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -26,6 +26,7 @@
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/iopoll.h> 27#include <linux/iopoll.h>
28#include <linux/can/dev.h> 28#include <linux/can/dev.h>
29#include <linux/pinctrl/consumer.h>
29 30
30/* napi related */ 31/* napi related */
31#define M_CAN_NAPI_WEIGHT 64 32#define M_CAN_NAPI_WEIGHT 64
@@ -253,7 +254,7 @@ enum m_can_mram_cfg {
253 254
254/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 255/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
255#define RXFC_FWM_SHIFT 24 256#define RXFC_FWM_SHIFT 24
256#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) 257#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
257#define RXFC_FS_SHIFT 16 258#define RXFC_FS_SHIFT 16
258#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) 259#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
259 260
@@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)
1700 m_can_clk_stop(priv); 1701 m_can_clk_stop(priv);
1701 } 1702 }
1702 1703
1704 pinctrl_pm_select_sleep_state(dev);
1705
1703 priv->can.state = CAN_STATE_SLEEPING; 1706 priv->can.state = CAN_STATE_SLEEPING;
1704 1707
1705 return 0; 1708 return 0;
@@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev)
1710 struct net_device *ndev = dev_get_drvdata(dev); 1713 struct net_device *ndev = dev_get_drvdata(dev);
1711 struct m_can_priv *priv = netdev_priv(ndev); 1714 struct m_can_priv *priv = netdev_priv(ndev);
1712 1715
1716 pinctrl_pm_select_default_state(dev);
1717
1713 m_can_init_ram(priv); 1718 m_can_init_ram(priv);
1714 1719
1715 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1720 priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 55513411a82e..ed8561d4a90f 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
262 262
263 spin_lock_irqsave(&priv->echo_lock, flags); 263 spin_lock_irqsave(&priv->echo_lock, flags);
264 can_get_echo_skb(priv->ndev, msg->client); 264 can_get_echo_skb(priv->ndev, msg->client);
265 spin_unlock_irqrestore(&priv->echo_lock, flags);
266 265
267 /* count bytes of the echo instead of skb */ 266 /* count bytes of the echo instead of skb */
268 stats->tx_bytes += cf_len; 267 stats->tx_bytes += cf_len;
@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
271 /* restart tx queue (a slot is free) */ 270 /* restart tx queue (a slot is free) */
272 netif_wake_queue(priv->ndev); 271 netif_wake_queue(priv->ndev);
273 272
273 spin_unlock_irqrestore(&priv->echo_lock, flags);
274 return 0; 274 return 0;
275 } 275 }
276 276
@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
333 333
334 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ 334 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
335 if (pucan_status_is_rx_barrier(msg)) { 335 if (pucan_status_is_rx_barrier(msg)) {
336 unsigned long flags;
337 336
338 if (priv->enable_tx_path) { 337 if (priv->enable_tx_path) {
339 int err = priv->enable_tx_path(priv); 338 int err = priv->enable_tx_path(priv);
@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
342 return err; 341 return err;
343 } 342 }
344 343
345 /* restart network queue only if echo skb array is free */ 344 /* start network queue (echo_skb array is empty) */
346 spin_lock_irqsave(&priv->echo_lock, flags); 345 netif_start_queue(ndev);
347
348 if (!priv->can.echo_skb[priv->echo_idx]) {
349 spin_unlock_irqrestore(&priv->echo_lock, flags);
350
351 netif_wake_queue(ndev);
352 } else {
353 spin_unlock_irqrestore(&priv->echo_lock, flags);
354 }
355 346
356 return 0; 347 return 0;
357 } 348 }
@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
726 */ 717 */
727 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); 718 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
728 719
729 spin_unlock_irqrestore(&priv->echo_lock, flags);
730
731 /* write the skb on the interface */
732 priv->write_tx_msg(priv, msg);
733
734 /* stop network tx queue if not enough room to save one more msg too */ 720 /* stop network tx queue if not enough room to save one more msg too */
735 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 721 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
736 should_stop_tx_queue |= (room_left < 722 should_stop_tx_queue |= (room_left <
@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
742 if (should_stop_tx_queue) 728 if (should_stop_tx_queue)
743 netif_stop_queue(ndev); 729 netif_stop_queue(ndev);
744 730
731 spin_unlock_irqrestore(&priv->echo_lock, flags);
732
733 /* write the skb on the interface */
734 priv->write_tx_msg(priv, msg);
735
745 return NETDEV_TX_OK; 736 return NETDEV_TX_OK;
746} 737}
747 738
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 788c3464a3b0..3c51a884db87 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg)
349 priv->tx_pages_free++; 349 priv->tx_pages_free++;
350 spin_unlock_irqrestore(&priv->tx_lock, flags); 350 spin_unlock_irqrestore(&priv->tx_lock, flags);
351 351
352 /* wake producer up */ 352 /* wake producer up (only if enough room in echo_skb array) */
353 netif_wake_queue(priv->ucan.ndev); 353 spin_lock_irqsave(&priv->ucan.echo_lock, flags);
354 if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
355 netif_wake_queue(priv->ucan.ndev);
356
357 spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
354 } 358 }
355 359
356 /* re-enable Rx DMA transfer for this CAN */ 360 /* re-enable Rx DMA transfer for this CAN */
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index d040aeb45172..15c2a831edf1 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,7 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o 2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
3bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o 3bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
4obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o 4obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
5ifdef CONFIG_NET_DSA_LOOP
6obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
7endif
5obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o 8obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
6obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o 9obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
7obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o 10obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index cd16067265dd..78616787f2a3 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
814 unsigned int i; 814 unsigned int i;
815 815
816 for (i = 0; i < mib_size; i++) 816 for (i = 0; i < mib_size; i++)
817 memcpy(data + i * ETH_GSTRING_LEN, 817 strlcpy(data + i * ETH_GSTRING_LEN,
818 mibs[i].name, ETH_GSTRING_LEN); 818 mibs[i].name, ETH_GSTRING_LEN);
819} 819}
820EXPORT_SYMBOL(b53_get_strings); 820EXPORT_SYMBOL(b53_get_strings);
821 821
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 29c3075bfb05..fdc673484add 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config NET_VENDOR_8390 5config NET_VENDOR_8390
6 bool "National Semi-conductor 8390 devices" 6 bool "National Semiconductor 8390 devices"
7 default y 7 default y
8 depends on NET_VENDOR_NATSEMI 8 depends on NET_VENDOR_NATSEMI
9 ---help--- 9 ---help---
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0b49f1aeebd3..fc7383106946 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -36,6 +36,8 @@
36#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) 36#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
37#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) 37#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
38 38
39#define AQ_CFG_TX_CLEAN_BUDGET 256U
40
39/* LRO */ 41/* LRO */
40#define AQ_CFG_IS_LRO_DEF 1U 42#define AQ_CFG_IS_LRO_DEF 1U
41 43
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ebbaf63eaf47..c96a92118b8b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
247 self->ndev->hw_features |= aq_hw_caps->hw_features; 247 self->ndev->hw_features |= aq_hw_caps->hw_features;
248 self->ndev->features = aq_hw_caps->hw_features; 248 self->ndev->features = aq_hw_caps->hw_features;
249 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 249 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
250 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
251
250 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 252 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
251 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; 253 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
252 254
@@ -937,3 +939,23 @@ err_exit:
937out: 939out:
938 return err; 940 return err;
939} 941}
942
943void aq_nic_shutdown(struct aq_nic_s *self)
944{
945 int err = 0;
946
947 if (!self->ndev)
948 return;
949
950 rtnl_lock();
951
952 netif_device_detach(self->ndev);
953
954 err = aq_nic_stop(self);
955 if (err < 0)
956 goto err_exit;
957 aq_nic_deinit(self);
958
959err_exit:
960 rtnl_unlock();
961} \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index d16b0f1a95aa..219b550d1665 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
118u32 aq_nic_get_fw_version(struct aq_nic_s *self); 118u32 aq_nic_get_fw_version(struct aq_nic_s *self);
119int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 119int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
120int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); 120int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
121void aq_nic_shutdown(struct aq_nic_s *self);
121 122
122#endif /* AQ_NIC_H */ 123#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 87c4308b52a7..ecc6306f940f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -323,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev)
323 pci_disable_device(pdev); 323 pci_disable_device(pdev);
324} 324}
325 325
326static void aq_pci_shutdown(struct pci_dev *pdev)
327{
328 struct aq_nic_s *self = pci_get_drvdata(pdev);
329
330 aq_nic_shutdown(self);
331
332 pci_disable_device(pdev);
333
334 if (system_state == SYSTEM_POWER_OFF) {
335 pci_wake_from_d3(pdev, false);
336 pci_set_power_state(pdev, PCI_D3hot);
337 }
338}
339
326static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) 340static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
327{ 341{
328 struct aq_nic_s *self = pci_get_drvdata(pdev); 342 struct aq_nic_s *self = pci_get_drvdata(pdev);
@@ -345,6 +359,7 @@ static struct pci_driver aq_pci_ops = {
345 .remove = aq_pci_remove, 359 .remove = aq_pci_remove,
346 .suspend = aq_pci_suspend, 360 .suspend = aq_pci_suspend,
347 .resume = aq_pci_resume, 361 .resume = aq_pci_resume,
362 .shutdown = aq_pci_shutdown,
348}; 363};
349 364
350module_pci_driver(aq_pci_ops); 365module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0be6a11370bb..b5f1f62e8e25 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
136 netif_stop_subqueue(ndev, ring->idx); 136 netif_stop_subqueue(ndev, ring->idx);
137} 137}
138 138
139void aq_ring_tx_clean(struct aq_ring_s *self) 139bool aq_ring_tx_clean(struct aq_ring_s *self)
140{ 140{
141 struct device *dev = aq_nic_get_dev(self->aq_nic); 141 struct device *dev = aq_nic_get_dev(self->aq_nic);
142 unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
142 143
143 for (; self->sw_head != self->hw_head; 144 for (; self->sw_head != self->hw_head && budget--;
144 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { 145 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
145 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 146 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
146 147
@@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
167 buff->pa = 0U; 168 buff->pa = 0U;
168 buff->eop_index = 0xffffU; 169 buff->eop_index = 0xffffU;
169 } 170 }
171
172 return !!budget;
170} 173}
171 174
172#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 965fae0fb6e0..ac1329f4051d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self);
153void aq_ring_update_queue_state(struct aq_ring_s *ring); 153void aq_ring_update_queue_state(struct aq_ring_s *ring);
154void aq_ring_queue_wake(struct aq_ring_s *ring); 154void aq_ring_queue_wake(struct aq_ring_s *ring);
155void aq_ring_queue_stop(struct aq_ring_s *ring); 155void aq_ring_queue_stop(struct aq_ring_s *ring);
156void aq_ring_tx_clean(struct aq_ring_s *self); 156bool aq_ring_tx_clean(struct aq_ring_s *self);
157int aq_ring_rx_clean(struct aq_ring_s *self, 157int aq_ring_rx_clean(struct aq_ring_s *self,
158 struct napi_struct *napi, 158 struct napi_struct *napi,
159 int *work_done, 159 int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f890b8a5a862..d335c334fa56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -35,12 +35,12 @@ struct aq_vec_s {
35static int aq_vec_poll(struct napi_struct *napi, int budget) 35static int aq_vec_poll(struct napi_struct *napi, int budget)
36{ 36{
37 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 37 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
38 unsigned int sw_tail_old = 0U;
38 struct aq_ring_s *ring = NULL; 39 struct aq_ring_s *ring = NULL;
40 bool was_tx_cleaned = true;
41 unsigned int i = 0U;
39 int work_done = 0; 42 int work_done = 0;
40 int err = 0; 43 int err = 0;
41 unsigned int i = 0U;
42 unsigned int sw_tail_old = 0U;
43 bool was_tx_cleaned = false;
44 44
45 if (!self) { 45 if (!self) {
46 err = -EINVAL; 46 err = -EINVAL;
@@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
57 57
58 if (ring[AQ_VEC_TX_ID].sw_head != 58 if (ring[AQ_VEC_TX_ID].sw_head !=
59 ring[AQ_VEC_TX_ID].hw_head) { 59 ring[AQ_VEC_TX_ID].hw_head) {
60 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 60 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
61 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 61 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
62 was_tx_cleaned = true;
63 } 62 }
64 63
65 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 64 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
@@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
90 } 89 }
91 } 90 }
92 91
93 if (was_tx_cleaned) 92 if (!was_tx_cleaned)
94 work_done = budget; 93 work_done = budget;
95 94
96 if (work_done < budget) { 95 if (work_done < budget) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 967f0fd07fcf..d3b847ec7465 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -21,6 +21,10 @@
21 21
22#define HW_ATL_UCP_0X370_REG 0x0370U 22#define HW_ATL_UCP_0X370_REG 0x0370U
23 23
24#define HW_ATL_MIF_CMD 0x0200U
25#define HW_ATL_MIF_ADDR 0x0208U
26#define HW_ATL_MIF_VAL 0x020CU
27
24#define HW_ATL_FW_SM_RAM 0x2U 28#define HW_ATL_FW_SM_RAM 0x2U
25#define HW_ATL_MPI_FW_VERSION 0x18 29#define HW_ATL_MPI_FW_VERSION 0x18
26#define HW_ATL_MPI_CONTROL_ADR 0x0368U 30#define HW_ATL_MPI_CONTROL_ADR 0x0368U
@@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
79 83
80static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) 84static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
81{ 85{
86 u32 gsr, val;
82 int k = 0; 87 int k = 0;
83 u32 gsr;
84 88
85 aq_hw_write_reg(self, 0x404, 0x40e1); 89 aq_hw_write_reg(self, 0x404, 0x40e1);
86 AQ_HW_SLEEP(50); 90 AQ_HW_SLEEP(50);
87 91
88 /* Cleanup SPI */ 92 /* Cleanup SPI */
89 aq_hw_write_reg(self, 0x534, 0xA0); 93 val = aq_hw_read_reg(self, 0x53C);
90 aq_hw_write_reg(self, 0x100, 0x9F); 94 aq_hw_write_reg(self, 0x53C, val | 0x10);
91 aq_hw_write_reg(self, 0x100, 0x809F);
92 95
93 gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); 96 gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
94 aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); 97 aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
@@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
97 aq_hw_write_reg(self, 0x404, 0x80e0); 100 aq_hw_write_reg(self, 0x404, 0x80e0);
98 aq_hw_write_reg(self, 0x32a8, 0x0); 101 aq_hw_write_reg(self, 0x32a8, 0x0);
99 aq_hw_write_reg(self, 0x520, 0x1); 102 aq_hw_write_reg(self, 0x520, 0x1);
103
104 /* Reset SPI again because of possible interrupted SPI burst */
105 val = aq_hw_read_reg(self, 0x53C);
106 aq_hw_write_reg(self, 0x53C, val | 0x10);
100 AQ_HW_SLEEP(10); 107 AQ_HW_SLEEP(10);
108 /* Clear SPI reset state */
109 aq_hw_write_reg(self, 0x53C, val & ~0x10);
110
101 aq_hw_write_reg(self, 0x404, 0x180e0); 111 aq_hw_write_reg(self, 0x404, 0x180e0);
102 112
103 for (k = 0; k < 1000; k++) { 113 for (k = 0; k < 1000; k++) {
@@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
141 aq_pr_err("FW kickstart failed\n"); 151 aq_pr_err("FW kickstart failed\n");
142 return -EIO; 152 return -EIO;
143 } 153 }
154 /* Old FW requires fixed delay after init */
155 AQ_HW_SLEEP(15);
144 156
145 return 0; 157 return 0;
146} 158}
147 159
148static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) 160static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
149{ 161{
150 u32 gsr, rbl_status; 162 u32 gsr, val, rbl_status;
151 int k; 163 int k;
152 164
153 aq_hw_write_reg(self, 0x404, 0x40e1); 165 aq_hw_write_reg(self, 0x404, 0x40e1);
@@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
157 /* Alter RBL status */ 169 /* Alter RBL status */
158 aq_hw_write_reg(self, 0x388, 0xDEAD); 170 aq_hw_write_reg(self, 0x388, 0xDEAD);
159 171
172 /* Cleanup SPI */
173 val = aq_hw_read_reg(self, 0x53C);
174 aq_hw_write_reg(self, 0x53C, val | 0x10);
175
160 /* Global software reset*/ 176 /* Global software reset*/
161 hw_atl_rx_rx_reg_res_dis_set(self, 0U); 177 hw_atl_rx_rx_reg_res_dis_set(self, 0U);
162 hw_atl_tx_tx_reg_res_dis_set(self, 0U); 178 hw_atl_tx_tx_reg_res_dis_set(self, 0U);
@@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
204 aq_pr_err("FW kickstart failed\n"); 220 aq_pr_err("FW kickstart failed\n");
205 return -EIO; 221 return -EIO;
206 } 222 }
223 /* Old FW requires fixed delay after init */
224 AQ_HW_SLEEP(15);
207 225
208 return 0; 226 return 0;
209} 227}
@@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
255 } 273 }
256 } 274 }
257 275
258 aq_hw_write_reg(self, 0x00000208U, a); 276 aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
259
260 for (++cnt; --cnt;) {
261 u32 i = 0U;
262 277
263 aq_hw_write_reg(self, 0x00000200U, 0x00008000U); 278 for (++cnt; --cnt && !err;) {
279 aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
264 280
265 for (i = 1024U; 281 if (IS_CHIP_FEATURE(REVISION_B1))
266 (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { 282 AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
267 } 283 HW_ATL_MIF_ADDR),
284 1, 1000U);
285 else
286 AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
287 HW_ATL_MIF_CMD)),
288 1, 1000U);
268 289
269 *(p++) = aq_hw_read_reg(self, 0x0000020CU); 290 *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
291 a += 4;
270 } 292 }
271 293
272 hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); 294 hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
@@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
662 u32 val = hw_atl_reg_glb_mif_id_get(self); 684 u32 val = hw_atl_reg_glb_mif_id_get(self);
663 u32 mif_rev = val & 0xFFU; 685 u32 mif_rev = val & 0xFFU;
664 686
665 if ((3U & mif_rev) == 1U) { 687 if ((0xFU & mif_rev) == 1U) {
666 chip_features |= 688 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
667 HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
668 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | 689 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
669 HAL_ATLANTIC_UTILS_CHIP_MIPS; 690 HAL_ATLANTIC_UTILS_CHIP_MIPS;
670 } else if ((3U & mif_rev) == 2U) { 691 } else if ((0xFU & mif_rev) == 2U) {
671 chip_features |= 692 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
672 HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | 693 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
694 HAL_ATLANTIC_UTILS_CHIP_MIPS |
695 HAL_ATLANTIC_UTILS_CHIP_TPO2 |
696 HAL_ATLANTIC_UTILS_CHIP_RPF2;
697 } else if ((0xFU & mif_rev) == 0xAU) {
698 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
673 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | 699 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
674 HAL_ATLANTIC_UTILS_CHIP_MIPS | 700 HAL_ATLANTIC_UTILS_CHIP_MIPS |
675 HAL_ATLANTIC_UTILS_CHIP_TPO2 | 701 HAL_ATLANTIC_UTILS_CHIP_TPO2 |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 2c690947910a..cd8f18f39c61 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox {
161#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U 161#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
162#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U 162#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
163#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U 163#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
164#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
164 165
165#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ 166#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
166 self->chip_features) 167 self->chip_features)
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 5265b937677b..a445de6837a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -13,7 +13,7 @@
13#define NIC_MAJOR_DRIVER_VERSION 2 13#define NIC_MAJOR_DRIVER_VERSION 2
14#define NIC_MINOR_DRIVER_VERSION 0 14#define NIC_MINOR_DRIVER_VERSION 0
15#define NIC_BUILD_DRIVER_VERSION 2 15#define NIC_BUILD_DRIVER_VERSION 2
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 1
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern" 18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19 19
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 16f9bee992fe..0f6576802607 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
169 /* Optional regulator for PHY */ 169 /* Optional regulator for PHY */
170 priv->regulator = devm_regulator_get_optional(dev, "phy"); 170 priv->regulator = devm_regulator_get_optional(dev, "phy");
171 if (IS_ERR(priv->regulator)) { 171 if (IS_ERR(priv->regulator)) {
172 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) 172 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
173 return -EPROBE_DEFER; 173 err = -EPROBE_DEFER;
174 goto out_clk_disable;
175 }
174 dev_err(dev, "no regulator found\n"); 176 dev_err(dev, "no regulator found\n");
175 priv->regulator = NULL; 177 priv->regulator = NULL;
176 } 178 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f15a8fc6dfc9..3fc549b88c43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
855static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 855static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
856 struct bcm_sysport_tx_ring *ring) 856 struct bcm_sysport_tx_ring *ring)
857{ 857{
858 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
859 unsigned int pkts_compl = 0, bytes_compl = 0; 858 unsigned int pkts_compl = 0, bytes_compl = 0;
860 struct net_device *ndev = priv->netdev; 859 struct net_device *ndev = priv->netdev;
860 unsigned int txbds_processed = 0;
861 struct bcm_sysport_cb *cb; 861 struct bcm_sysport_cb *cb;
862 unsigned int txbds_ready;
863 unsigned int c_index;
862 u32 hw_ind; 864 u32 hw_ind;
863 865
864 /* Clear status before servicing to reduce spurious interrupts */ 866 /* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
871 /* Compute how many descriptors have been processed since last call */ 873 /* Compute how many descriptors have been processed since last call */
872 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 874 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
873 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 875 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
874 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 876 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
875
876 last_c_index = ring->c_index;
877 num_tx_cbs = ring->size;
878
879 c_index &= (num_tx_cbs - 1);
880
881 if (c_index >= last_c_index)
882 last_tx_cn = c_index - last_c_index;
883 else
884 last_tx_cn = num_tx_cbs - last_c_index + c_index;
885 877
886 netif_dbg(priv, tx_done, ndev, 878 netif_dbg(priv, tx_done, ndev,
887 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 879 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
888 ring->index, c_index, last_tx_cn, last_c_index); 880 ring->index, ring->c_index, c_index, txbds_ready);
889 881
890 while (last_tx_cn-- > 0) { 882 while (txbds_processed < txbds_ready) {
891 cb = ring->cbs + last_c_index; 883 cb = &ring->cbs[ring->clean_index];
892 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 884 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
893 885
894 ring->desc_count++; 886 ring->desc_count++;
895 last_c_index++; 887 txbds_processed++;
896 last_c_index &= (num_tx_cbs - 1); 888
889 if (likely(ring->clean_index < ring->size - 1))
890 ring->clean_index++;
891 else
892 ring->clean_index = 0;
897 } 893 }
898 894
899 u64_stats_update_begin(&priv->syncp); 895 u64_stats_update_begin(&priv->syncp);
@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1394 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1390 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1395 ring->index = index; 1391 ring->index = index;
1396 ring->size = size; 1392 ring->size = size;
1393 ring->clean_index = 0;
1397 ring->alloc_size = ring->size; 1394 ring->alloc_size = ring->size;
1398 ring->desc_cpu = p; 1395 ring->desc_cpu = p;
1399 ring->desc_count = ring->size; 1396 ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f5a984c1c986..19c91c76e327 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
706 unsigned int desc_count; /* Number of descriptors */ 706 unsigned int desc_count; /* Number of descriptors */
707 unsigned int curr_desc; /* Current descriptor */ 707 unsigned int curr_desc; /* Current descriptor */
708 unsigned int c_index; /* Last consumer index */ 708 unsigned int c_index; /* Last consumer index */
709 unsigned int p_index; /* Current producer index */ 709 unsigned int clean_index; /* Current clean index */
710 struct bcm_sysport_cb *cbs; /* Transmit control blocks */ 710 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
711 struct dma_desc *desc_cpu; /* CPU view of the descriptor */ 711 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
712 struct bcm_sysport_priv *priv; /* private context backpointer */ 712 struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 74fc9af4aadb..b8388e93520a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
13913 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); 13913 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13914 if (IS_ERR(bp->ptp_clock)) { 13914 if (IS_ERR(bp->ptp_clock)) {
13915 bp->ptp_clock = NULL; 13915 bp->ptp_clock = NULL;
13916 BNX2X_ERR("PTP clock registeration failed\n"); 13916 BNX2X_ERR("PTP clock registration failed\n");
13917 } 13917 }
13918} 13918}
13919 13919
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1500243b9886..c7e5e6f09647 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1440 u16 vlan_proto = tpa_info->metadata >> 1440 u16 vlan_proto = tpa_info->metadata >>
1441 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1441 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1443 1443
1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1445 } 1445 }
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1628 1628
1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3848 struct hwrm_vnic_tpa_cfg_input req = {0}; 3848 struct hwrm_vnic_tpa_cfg_input req = {0};
3849 3849
3850 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3851 return 0;
3852
3850 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3851 3854
3852 if (tpa_flags) { 3855 if (tpa_flags) {
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4558 return rc; 4561 return rc;
4559} 4562}
4560 4563
4561static int 4564static void
4562bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4565__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
4563 int ring_grps, int cp_rings, int vnics) 4566 int tx_rings, int rx_rings, int ring_grps,
4567 int cp_rings, int vnics)
4564{ 4568{
4565 struct hwrm_func_cfg_input req = {0};
4566 u32 enables = 0; 4569 u32 enables = 0;
4567 int rc;
4568 4570
4569 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4571 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
4570 req.fid = cpu_to_le16(0xffff); 4572 req->fid = cpu_to_le16(0xffff);
4571 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4573 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4572 req.num_tx_rings = cpu_to_le16(tx_rings); 4574 req->num_tx_rings = cpu_to_le16(tx_rings);
4573 if (bp->flags & BNXT_FLAG_NEW_RM) { 4575 if (bp->flags & BNXT_FLAG_NEW_RM) {
4574 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 4576 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4575 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 4577 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4578 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 4580 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4579 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 4581 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4580 4582
4581 req.num_rx_rings = cpu_to_le16(rx_rings); 4583 req->num_rx_rings = cpu_to_le16(rx_rings);
4582 req.num_hw_ring_grps = cpu_to_le16(ring_grps); 4584 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4583 req.num_cmpl_rings = cpu_to_le16(cp_rings); 4585 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4584 req.num_stat_ctxs = req.num_cmpl_rings; 4586 req->num_stat_ctxs = req->num_cmpl_rings;
4585 req.num_vnics = cpu_to_le16(vnics); 4587 req->num_vnics = cpu_to_le16(vnics);
4586 } 4588 }
4587 if (!enables) 4589 req->enables = cpu_to_le32(enables);
4590}
4591
4592static void
4593__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
4594 struct hwrm_func_vf_cfg_input *req, int tx_rings,
4595 int rx_rings, int ring_grps, int cp_rings,
4596 int vnics)
4597{
4598 u32 enables = 0;
4599
4600 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
4601 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4602 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4603 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4604 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4605 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4606 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4607
4608 req->num_tx_rings = cpu_to_le16(tx_rings);
4609 req->num_rx_rings = cpu_to_le16(rx_rings);
4610 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4611 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4612 req->num_stat_ctxs = req->num_cmpl_rings;
4613 req->num_vnics = cpu_to_le16(vnics);
4614
4615 req->enables = cpu_to_le32(enables);
4616}
4617
4618static int
4619bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4620 int ring_grps, int cp_rings, int vnics)
4621{
4622 struct hwrm_func_cfg_input req = {0};
4623 int rc;
4624
4625 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4626 cp_rings, vnics);
4627 if (!req.enables)
4588 return 0; 4628 return 0;
4589 4629
4590 req.enables = cpu_to_le32(enables);
4591 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4630 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4592 if (rc) 4631 if (rc)
4593 return -ENOMEM; 4632 return -ENOMEM;
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4604 int ring_grps, int cp_rings, int vnics) 4643 int ring_grps, int cp_rings, int vnics)
4605{ 4644{
4606 struct hwrm_func_vf_cfg_input req = {0}; 4645 struct hwrm_func_vf_cfg_input req = {0};
4607 u32 enables = 0;
4608 int rc; 4646 int rc;
4609 4647
4610 if (!(bp->flags & BNXT_FLAG_NEW_RM)) { 4648 if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4612 return 0; 4650 return 0;
4613 } 4651 }
4614 4652
4615 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4653 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4616 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4654 cp_rings, vnics);
4617 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4618 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4619 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4620 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4621 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4622
4623 req.num_tx_rings = cpu_to_le16(tx_rings);
4624 req.num_rx_rings = cpu_to_le16(rx_rings);
4625 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4626 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4627 req.num_stat_ctxs = req.num_cmpl_rings;
4628 req.num_vnics = cpu_to_le16(vnics);
4629
4630 req.enables = cpu_to_le32(enables);
4631 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4655 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4632 if (rc) 4656 if (rc)
4633 return -ENOMEM; 4657 return -ENOMEM;
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
4743} 4767}
4744 4768
4745static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4769static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4746 int ring_grps, int cp_rings) 4770 int ring_grps, int cp_rings, int vnics)
4747{ 4771{
4748 struct hwrm_func_vf_cfg_input req = {0}; 4772 struct hwrm_func_vf_cfg_input req = {0};
4749 u32 flags, enables; 4773 u32 flags;
4750 int rc; 4774 int rc;
4751 4775
4752 if (!(bp->flags & BNXT_FLAG_NEW_RM)) 4776 if (!(bp->flags & BNXT_FLAG_NEW_RM))
4753 return 0; 4777 return 0;
4754 4778
4755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4779 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4780 cp_rings, vnics);
4756 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 4781 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
4757 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4782 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4758 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4783 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4759 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4784 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4760 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4785 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4761 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4786 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4762 enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
4763 FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
4764 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4765 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4766 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4767 FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
4768 4787
4769 req.flags = cpu_to_le32(flags); 4788 req.flags = cpu_to_le32(flags);
4770 req.enables = cpu_to_le32(enables);
4771 req.num_tx_rings = cpu_to_le16(tx_rings);
4772 req.num_rx_rings = cpu_to_le16(rx_rings);
4773 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4774 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4775 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4776 req.num_vnics = cpu_to_le16(1);
4777 if (bp->flags & BNXT_FLAG_RFS)
4778 req.num_vnics = cpu_to_le16(rx_rings + 1);
4779 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4789 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4780 if (rc) 4790 if (rc)
4781 return -ENOMEM; 4791 return -ENOMEM;
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4783} 4793}
4784 4794
4785static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4795static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4786 int ring_grps, int cp_rings) 4796 int ring_grps, int cp_rings, int vnics)
4787{ 4797{
4788 struct hwrm_func_cfg_input req = {0}; 4798 struct hwrm_func_cfg_input req = {0};
4789 u32 flags, enables; 4799 u32 flags;
4790 int rc; 4800 int rc;
4791 4801
4792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4802 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4793 req.fid = cpu_to_le16(0xffff); 4803 cp_rings, vnics);
4794 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 4804 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
4795 enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; 4805 if (bp->flags & BNXT_FLAG_NEW_RM)
4796 req.num_tx_rings = cpu_to_le16(tx_rings);
4797 if (bp->flags & BNXT_FLAG_NEW_RM) {
4798 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4806 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4799 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4807 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4800 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4808 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4801 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4809 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4802 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4810 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4803 enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 4811
4804 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4805 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4806 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4807 FUNC_CFG_REQ_ENABLES_NUM_VNICS;
4808 req.num_rx_rings = cpu_to_le16(rx_rings);
4809 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4810 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4811 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4812 req.num_vnics = cpu_to_le16(1);
4813 if (bp->flags & BNXT_FLAG_RFS)
4814 req.num_vnics = cpu_to_le16(rx_rings + 1);
4815 }
4816 req.flags = cpu_to_le32(flags); 4812 req.flags = cpu_to_le32(flags);
4817 req.enables = cpu_to_le32(enables);
4818 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4813 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4819 if (rc) 4814 if (rc)
4820 return -ENOMEM; 4815 return -ENOMEM;
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4822} 4817}
4823 4818
4824static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4819static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4825 int ring_grps, int cp_rings) 4820 int ring_grps, int cp_rings, int vnics)
4826{ 4821{
4827 if (bp->hwrm_spec_code < 0x10801) 4822 if (bp->hwrm_spec_code < 0x10801)
4828 return 0; 4823 return 0;
4829 4824
4830 if (BNXT_PF(bp)) 4825 if (BNXT_PF(bp))
4831 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 4826 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
4832 ring_grps, cp_rings); 4827 ring_grps, cp_rings, vnics);
4833 4828
4834 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 4829 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
4835 cp_rings); 4830 cp_rings, vnics);
4836} 4831}
4837 4832
4838static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, 4833static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)
5865 if (rc) 5860 if (rc)
5866 goto msix_setup_exit; 5861 goto msix_setup_exit;
5867 5862
5868 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5869 bp->cp_nr_rings = (min == 1) ? 5863 bp->cp_nr_rings = (min == 1) ?
5870 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5864 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5871 bp->tx_nr_rings + bp->rx_nr_rings; 5865 bp->tx_nr_rings + bp->rx_nr_rings;
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)
5897 bp->rx_nr_rings = 1; 5891 bp->rx_nr_rings = 1;
5898 bp->tx_nr_rings = 1; 5892 bp->tx_nr_rings = 1;
5899 bp->cp_nr_rings = 1; 5893 bp->cp_nr_rings = 1;
5900 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5901 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5894 bp->flags |= BNXT_FLAG_SHARED_RINGS;
5902 bp->irq_tbl[0].vector = bp->pdev->irq; 5895 bp->irq_tbl[0].vector = bp->pdev->irq;
5903 return 0; 5896 return 0;
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7531 int max_rx, max_tx, tx_sets = 1; 7524 int max_rx, max_tx, tx_sets = 1;
7532 int tx_rings_needed; 7525 int tx_rings_needed;
7533 int rx_rings = rx; 7526 int rx_rings = rx;
7534 int cp, rc; 7527 int cp, vnics, rc;
7535 7528
7536 if (tcs) 7529 if (tcs)
7537 tx_sets = tcs; 7530 tx_sets = tcs;
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7547 if (max_tx < tx_rings_needed) 7540 if (max_tx < tx_rings_needed)
7548 return -ENOMEM; 7541 return -ENOMEM;
7549 7542
7543 vnics = 1;
7544 if (bp->flags & BNXT_FLAG_RFS)
7545 vnics += rx_rings;
7546
7550 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7547 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7551 rx_rings <<= 1; 7548 rx_rings <<= 1;
7552 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 7549 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
7553 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); 7550 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
7551 vnics);
7554} 7552}
7555 7553
7556static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7554static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
8437 return 0; 8435 return 0;
8438 8436
8439 bnxt_hwrm_func_qcaps(bp); 8437 bnxt_hwrm_func_qcaps(bp);
8440 __bnxt_close_nic(bp, true, false); 8438
8439 if (netif_running(bp->dev))
8440 __bnxt_close_nic(bp, true, false);
8441
8441 bnxt_clear_int_mode(bp); 8442 bnxt_clear_int_mode(bp);
8442 rc = bnxt_init_int_mode(bp); 8443 rc = bnxt_init_int_mode(bp);
8443 if (rc) 8444
8444 dev_close(bp->dev); 8445 if (netif_running(bp->dev)) {
8445 else 8446 if (rc)
8446 rc = bnxt_open_nic(bp, true, false); 8447 dev_close(bp->dev);
8448 else
8449 rc = bnxt_open_nic(bp, true, false);
8450 }
8451
8447 return rc; 8452 return rc;
8448} 8453}
8449 8454
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8664 if (rc) 8669 if (rc)
8665 goto init_err_pci_clean; 8670 goto init_err_pci_clean;
8666 8671
8672 /* No TC has been set yet and rings may have been trimmed due to
8673 * limited MSIX, so we re-initialize the TX rings per TC.
8674 */
8675 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8676
8667 bnxt_get_wol_settings(bp); 8677 bnxt_get_wol_settings(bp);
8668 if (bp->flags & BNXT_FLAG_WOL_CAP) 8678 if (bp->flags & BNXT_FLAG_WOL_CAP)
8669 device_set_wakeup_enable(&pdev->dev, bp->wol); 8679 device_set_wakeup_enable(&pdev->dev, bp->wol);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1989c470172c..5e3d62189cab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -189,6 +189,7 @@ struct rx_cmp_ext {
189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) 189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) 190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
191 __le32 rx_cmp_meta_data; 191 __le32 rx_cmp_meta_data;
192 #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff
192 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff 193 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
193 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 194 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
194 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 195 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index fbe6e208e17b..65c2cee35766 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
349 if (rc) 349 if (rc)
350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", 350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__, flow_handle, rc); 351 __func__, flow_handle, rc);
352
353 if (rc)
354 rc = -EIO;
352 return rc; 355 return rc;
353} 356}
354 357
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
484 req.action_flags = cpu_to_le16(action_flags); 487 req.action_flags = cpu_to_le16(action_flags);
485 488
486 mutex_lock(&bp->hwrm_cmd_lock); 489 mutex_lock(&bp->hwrm_cmd_lock);
487
488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 490 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
489 if (!rc) 491 if (!rc)
490 *flow_handle = resp->flow_handle; 492 *flow_handle = resp->flow_handle;
491
492 mutex_unlock(&bp->hwrm_cmd_lock); 493 mutex_unlock(&bp->hwrm_cmd_lock);
493 494
495 if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
496 rc = -ENOSPC;
497 else if (rc)
498 rc = -EIO;
494 return rc; 499 return rc;
495} 500}
496 501
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
561 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 566 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
562 mutex_unlock(&bp->hwrm_cmd_lock); 567 mutex_unlock(&bp->hwrm_cmd_lock);
563 568
569 if (rc)
570 rc = -EIO;
564 return rc; 571 return rc;
565} 572}
566 573
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
576 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 583 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
577 if (rc) 584 if (rc)
578 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 585 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
586
587 if (rc)
588 rc = -EIO;
579 return rc; 589 return rc;
580} 590}
581 591
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
624 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 634 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
625 mutex_unlock(&bp->hwrm_cmd_lock); 635 mutex_unlock(&bp->hwrm_cmd_lock);
626 636
637 if (rc)
638 rc = -EIO;
627 return rc; 639 return rc;
628} 640}
629 641
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 651 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
640 if (rc) 652 if (rc)
641 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 653 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
654
655 if (rc)
656 rc = -EIO;
642 return rc; 657 return rc;
643} 658}
644 659
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
1269 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1284 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1270 &tc_flow_cmd->cookie, 1285 &tc_flow_cmd->cookie,
1271 tc_info->flow_ht_params); 1286 tc_info->flow_ht_params);
1272 if (!flow_node) { 1287 if (!flow_node)
1273 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1274 tc_flow_cmd->cookie);
1275 return -EINVAL; 1288 return -EINVAL;
1276 }
1277 1289
1278 return __bnxt_tc_del_flow(bp, flow_node); 1290 return __bnxt_tc_del_flow(bp, flow_node);
1279} 1291}
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1290 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1302 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1291 &tc_flow_cmd->cookie, 1303 &tc_flow_cmd->cookie,
1292 tc_info->flow_ht_params); 1304 tc_info->flow_ht_params);
1293 if (!flow_node) { 1305 if (!flow_node)
1294 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1295 tc_flow_cmd->cookie);
1296 return -1; 1306 return -1;
1297 }
1298 1307
1299 flow = &flow_node->flow; 1308 flow = &flow_node->flow;
1300 curr_stats = &flow->stats; 1309 curr_stats = &flow->stats;
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1344 } else { 1353 } else {
1345 netdev_info(bp->dev, "error rc=%d", rc); 1354 netdev_info(bp->dev, "error rc=%d", rc);
1346 } 1355 }
1347
1348 mutex_unlock(&bp->hwrm_cmd_lock); 1356 mutex_unlock(&bp->hwrm_cmd_lock);
1357
1358 if (rc)
1359 rc = -EIO;
1349 return rc; 1360 return rc;
1350} 1361}
1351 1362
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c1841db1b500..f2593978ae75 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
820 820
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 822
823 usleep_range(10, 20); 823 udelay(10);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 } 825 }
826 826
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 99c9b88d6d34..e880be8e3c45 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5234,7 +5234,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev)
5234 /* Initialize the device structure. */ 5234 /* Initialize the device structure. */
5235 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 5235 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5236 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 5236 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5237 dev->needs_free_netdev = true;
5238} 5237}
5239 5238
5240static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) 5239static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
@@ -5445,6 +5444,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5445 adapter->name = pci_name(pdev); 5444 adapter->name = pci_name(pdev);
5446 adapter->mbox = func; 5445 adapter->mbox = func;
5447 adapter->pf = func; 5446 adapter->pf = func;
5447 adapter->params.chip = chip;
5448 adapter->adap_idx = adap_idx;
5448 adapter->msg_enable = DFLT_MSG_ENABLE; 5449 adapter->msg_enable = DFLT_MSG_ENABLE;
5449 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + 5450 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5450 (sizeof(struct mbox_cmd) * 5451 (sizeof(struct mbox_cmd) *
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5eb999af2c40..bd3f6e4d1341 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev)
540 540
541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { 541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
542 dev_warn(geth->dev, "TX queue base it not aligned\n"); 542 dev_warn(geth->dev, "TX queue base it not aligned\n");
543 kfree(skb_tab);
543 return -ENOMEM; 544 return -ENOMEM;
544 } 545 }
545 546
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 159dc2df878d..fd43f98ddbe7 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2021,7 +2021,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
2021 } 2021 }
2022 2022
2023 if (unlikely(err < 0)) { 2023 if (unlikely(err < 0)) {
2024 percpu_stats->tx_errors++;
2025 percpu_stats->tx_fifo_errors++; 2024 percpu_stats->tx_fifo_errors++;
2026 return err; 2025 return err;
2027 } 2026 }
@@ -2289,7 +2288,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2289 vaddr = phys_to_virt(addr); 2288 vaddr = phys_to_virt(addr);
2290 prefetch(vaddr + qm_fd_get_offset(fd)); 2289 prefetch(vaddr + qm_fd_get_offset(fd));
2291 2290
2292 fd_format = qm_fd_get_format(fd);
2293 /* The only FD types that we may receive are contig and S/G */ 2291 /* The only FD types that we may receive are contig and S/G */
2294 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); 2292 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2295 2293
@@ -2322,8 +2320,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2322 2320
2323 skb_len = skb->len; 2321 skb_len = skb->len;
2324 2322
2325 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) 2323 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2324 percpu_stats->rx_dropped++;
2326 return qman_cb_dqrr_consume; 2325 return qman_cb_dqrr_consume;
2326 }
2327 2327
2328 percpu_stats->rx_packets++; 2328 percpu_stats->rx_packets++;
2329 percpu_stats->rx_bytes += skb_len; 2329 percpu_stats->rx_bytes += skb_len;
@@ -2871,7 +2871,7 @@ static int dpaa_remove(struct platform_device *pdev)
2871 struct device *dev; 2871 struct device *dev;
2872 int err; 2872 int err;
2873 2873
2874 dev = &pdev->dev; 2874 dev = pdev->dev.parent;
2875 net_dev = dev_get_drvdata(dev); 2875 net_dev = dev_get_drvdata(dev);
2876 2876
2877 priv = netdev_priv(net_dev); 2877 priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7a7f3a42b2aa..d4604bc8eb5b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev)
3600 fec_enet_mii_remove(fep); 3600 fec_enet_mii_remove(fep);
3601 if (fep->reg_phy) 3601 if (fep->reg_phy)
3602 regulator_disable(fep->reg_phy); 3602 regulator_disable(fep->reg_phy);
3603 pm_runtime_put(&pdev->dev);
3604 pm_runtime_disable(&pdev->dev);
3603 if (of_phy_is_fixed_link(np)) 3605 if (of_phy_is_fixed_link(np))
3604 of_phy_deregister_fixed_link(np); 3606 of_phy_deregister_fixed_link(np);
3605 of_node_put(fep->phy_node); 3607 of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 9a581faaa742..57b1e2b47c0a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
1100 set_bucket(dtsec->regs, bucket, true); 1100 set_bucket(dtsec->regs, bucket, true);
1101 1101
1102 /* Create element to be added to the driver hash table */ 1102 /* Create element to be added to the driver hash table */
1103 hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); 1103 hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1104 if (!hash_entry) 1104 if (!hash_entry)
1105 return -ENOMEM; 1105 return -ENOMEM;
1106 hash_entry->addr = addr; 1106 hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 86944bc3b273..74bd260ca02a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
666 666
667static int hns_gmac_get_sset_count(int stringset) 667static int hns_gmac_get_sset_count(int stringset)
668{ 668{
669 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 669 if (stringset == ETH_SS_STATS)
670 return ARRAY_SIZE(g_gmac_stats_string); 670 return ARRAY_SIZE(g_gmac_stats_string);
671 671
672 return 0; 672 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index b62816c1574e..93e71e27401b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
422 422
423int hns_ppe_get_sset_count(int stringset) 423int hns_ppe_get_sset_count(int stringset)
424{ 424{
425 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 425 if (stringset == ETH_SS_STATS)
426 return ETH_PPE_STATIC_NUM; 426 return ETH_PPE_STATIC_NUM;
427 return 0; 427 return 0;
428} 428}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6f3570cfb501..e2e28532e4dc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
876 */ 876 */
877int hns_rcb_get_ring_sset_count(int stringset) 877int hns_rcb_get_ring_sset_count(int stringset)
878{ 878{
879 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 879 if (stringset == ETH_SS_STATS)
880 return HNS_RING_STATIC_REG_NUM; 880 return HNS_RING_STATIC_REG_NUM;
881 881
882 return 0; 882 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7ea7f8a4aa2a..2e14a3ae1d8b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
993 cnt--; 993 cnt--;
994 994
995 return cnt; 995 return cnt;
996 } else { 996 } else if (stringset == ETH_SS_STATS) {
997 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); 997 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
998 } else {
999 return -EOPNOTSUPP;
998 } 1000 }
999} 1001}
1000 1002
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index afb7ebe20b24..824fd44e25f0 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -400,6 +400,10 @@
400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
401#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ 401#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
402#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 402#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
403#define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */
404#define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */
405#define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */
406#define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */
403#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 407#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
404/* If this bit asserted, the driver should claim the interrupt */ 408/* If this bit asserted, the driver should claim the interrupt */
405#define E1000_ICR_INT_ASSERTED 0x80000000 409#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -407,7 +411,7 @@
407#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 411#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
408#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ 412#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
409#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ 413#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
410#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ 414#define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */
411 415
412/* PBA ECC Register */ 416/* PBA ECC Register */
413#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ 417#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
@@ -431,12 +435,27 @@
431 E1000_IMS_RXSEQ | \ 435 E1000_IMS_RXSEQ | \
432 E1000_IMS_LSC) 436 E1000_IMS_LSC)
433 437
438/* These are all of the events related to the OTHER interrupt.
439 */
440#define IMS_OTHER_MASK ( \
441 E1000_IMS_LSC | \
442 E1000_IMS_RXO | \
443 E1000_IMS_MDAC | \
444 E1000_IMS_SRPD | \
445 E1000_IMS_ACK | \
446 E1000_IMS_MNG)
447
434/* Interrupt Mask Set */ 448/* Interrupt Mask Set */
435#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 449#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
436#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 450#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
437#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 451#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
438#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 452#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
453#define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */
439#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 454#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
455#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */
456#define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */
457#define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */
458#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */
440#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ 459#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
441#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 460#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
442#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 461#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 31277d3bb7dc..1dddfb7b2de6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,9 +1367,6 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1373 **/ 1370 **/
1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1375{ 1372{
@@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1385 * Change or Rx Sequence Error interrupt. 1382 * Change or Rx Sequence Error interrupt.
1386 */ 1383 */
1387 if (!mac->get_link_status) 1384 if (!mac->get_link_status)
1388 return 1; 1385 return 0;
1386 mac->get_link_status = false;
1389 1387
1390 /* First we want to see if the MII Status Register reports 1388 /* First we want to see if the MII Status Register reports
1391 * link. If so, then we want to get the current speed/duplex 1389 * link. If so, then we want to get the current speed/duplex
@@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1393 */ 1391 */
1394 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1392 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1395 if (ret_val) 1393 if (ret_val)
1396 return ret_val; 1394 goto out;
1397 1395
1398 if (hw->mac.type == e1000_pchlan) { 1396 if (hw->mac.type == e1000_pchlan) {
1399 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1397 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1400 if (ret_val) 1398 if (ret_val)
1401 return ret_val; 1399 goto out;
1402 } 1400 }
1403 1401
1404 /* When connected at 10Mbps half-duplex, some parts are excessively 1402 /* When connected at 10Mbps half-duplex, some parts are excessively
@@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1431 1429
1432 ret_val = hw->phy.ops.acquire(hw); 1430 ret_val = hw->phy.ops.acquire(hw);
1433 if (ret_val) 1431 if (ret_val)
1434 return ret_val; 1432 goto out;
1435 1433
1436 if (hw->mac.type == e1000_pch2lan) 1434 if (hw->mac.type == e1000_pch2lan)
1437 emi_addr = I82579_RX_CONFIG; 1435 emi_addr = I82579_RX_CONFIG;
@@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1453 hw->phy.ops.release(hw); 1451 hw->phy.ops.release(hw);
1454 1452
1455 if (ret_val) 1453 if (ret_val)
1456 return ret_val; 1454 goto out;
1457 1455
1458 if (hw->mac.type >= e1000_pch_spt) { 1456 if (hw->mac.type >= e1000_pch_spt) {
1459 u16 data; 1457 u16 data;
@@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1462 if (speed == SPEED_1000) { 1460 if (speed == SPEED_1000) {
1463 ret_val = hw->phy.ops.acquire(hw); 1461 ret_val = hw->phy.ops.acquire(hw);
1464 if (ret_val) 1462 if (ret_val)
1465 return ret_val; 1463 goto out;
1466 1464
1467 ret_val = e1e_rphy_locked(hw, 1465 ret_val = e1e_rphy_locked(hw,
1468 PHY_REG(776, 20), 1466 PHY_REG(776, 20),
1469 &data); 1467 &data);
1470 if (ret_val) { 1468 if (ret_val) {
1471 hw->phy.ops.release(hw); 1469 hw->phy.ops.release(hw);
1472 return ret_val; 1470 goto out;
1473 } 1471 }
1474 1472
1475 ptr_gap = (data & (0x3FF << 2)) >> 2; 1473 ptr_gap = (data & (0x3FF << 2)) >> 2;
@@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1483 } 1481 }
1484 hw->phy.ops.release(hw); 1482 hw->phy.ops.release(hw);
1485 if (ret_val) 1483 if (ret_val)
1486 return ret_val; 1484 goto out;
1487 } else { 1485 } else {
1488 ret_val = hw->phy.ops.acquire(hw); 1486 ret_val = hw->phy.ops.acquire(hw);
1489 if (ret_val) 1487 if (ret_val)
1490 return ret_val; 1488 goto out;
1491 1489
1492 ret_val = e1e_wphy_locked(hw, 1490 ret_val = e1e_wphy_locked(hw,
1493 PHY_REG(776, 20), 1491 PHY_REG(776, 20),
1494 0xC023); 1492 0xC023);
1495 hw->phy.ops.release(hw); 1493 hw->phy.ops.release(hw);
1496 if (ret_val) 1494 if (ret_val)
1497 return ret_val; 1495 goto out;
1498 1496
1499 } 1497 }
1500 } 1498 }
@@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1521 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1519 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1522 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1520 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1523 if (ret_val) 1521 if (ret_val)
1524 return ret_val; 1522 goto out;
1525 } 1523 }
1526 if (hw->mac.type >= e1000_pch_lpt) { 1524 if (hw->mac.type >= e1000_pch_lpt) {
1527 /* Set platform power management values for 1525 /* Set platform power management values for
@@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1529 */ 1527 */
1530 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1528 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1531 if (ret_val) 1529 if (ret_val)
1532 return ret_val; 1530 goto out;
1533 } 1531 }
1534 1532
1535 /* Clear link partner's EEE ability */ 1533 /* Clear link partner's EEE ability */
@@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1552 } 1550 }
1553 1551
1554 if (!link) 1552 if (!link)
1555 return 0; /* No link detected */ 1553 goto out;
1556
1557 mac->get_link_status = false;
1558 1554
1559 switch (hw->mac.type) { 1555 switch (hw->mac.type) {
1560 case e1000_pch2lan: 1556 case e1000_pch2lan:
@@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1616 * different link partner. 1612 * different link partner.
1617 */ 1613 */
1618 ret_val = e1000e_config_fc_after_link_up(hw); 1614 ret_val = e1000e_config_fc_after_link_up(hw);
1619 if (ret_val) { 1615 if (ret_val)
1620 e_dbg("Error configuring flow control\n"); 1616 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1623 1617
1624 return 1; 1618 return ret_val;
1619
1620out:
1621 mac->get_link_status = true;
1622 return ret_val;
1625} 1623}
1626 1624
1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1625static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f457c5703d0c..5bdc3a2d4fd7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
410 * Checks to see of the link status of the hardware has changed. If a 410 * Checks to see of the link status of the hardware has changed. If a
411 * change in link status has been detected, then we read the PHY registers 411 * change in link status has been detected, then we read the PHY registers
412 * to get the current speed/duplex if link exists. 412 * to get the current speed/duplex if link exists.
413 *
414 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
415 * up).
416 **/ 413 **/
417s32 e1000e_check_for_copper_link(struct e1000_hw *hw) 414s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
418{ 415{
@@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
426 * Change or Rx Sequence Error interrupt. 423 * Change or Rx Sequence Error interrupt.
427 */ 424 */
428 if (!mac->get_link_status) 425 if (!mac->get_link_status)
429 return 1; 426 return 0;
427 mac->get_link_status = false;
430 428
431 /* First we want to see if the MII Status Register reports 429 /* First we want to see if the MII Status Register reports
432 * link. If so, then we want to get the current speed/duplex 430 * link. If so, then we want to get the current speed/duplex
433 * of the PHY. 431 * of the PHY.
434 */ 432 */
435 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 433 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
436 if (ret_val) 434 if (ret_val || !link)
437 return ret_val; 435 goto out;
438
439 if (!link)
440 return 0; /* No link detected */
441
442 mac->get_link_status = false;
443 436
444 /* Check if there was DownShift, must be checked 437 /* Check if there was DownShift, must be checked
445 * immediately after link-up 438 * immediately after link-up
@@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
464 * different link partner. 457 * different link partner.
465 */ 458 */
466 ret_val = e1000e_config_fc_after_link_up(hw); 459 ret_val = e1000e_config_fc_after_link_up(hw);
467 if (ret_val) { 460 if (ret_val)
468 e_dbg("Error configuring flow control\n"); 461 e_dbg("Error configuring flow control\n");
469 return ret_val;
470 }
471 462
472 return 1; 463 return ret_val;
464
465out:
466 mac->get_link_status = true;
467 return ret_val;
473} 468}
474 469
475/** 470/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1298b69f990b..dc853b0863af 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1914 struct net_device *netdev = data; 1914 struct net_device *netdev = data;
1915 struct e1000_adapter *adapter = netdev_priv(netdev); 1915 struct e1000_adapter *adapter = netdev_priv(netdev);
1916 struct e1000_hw *hw = &adapter->hw; 1916 struct e1000_hw *hw = &adapter->hw;
1917 u32 icr; 1917 u32 icr = er32(ICR);
1918 bool enable = true; 1918
1919 1919 if (icr & adapter->eiac_mask)
1920 icr = er32(ICR); 1920 ew32(ICS, (icr & adapter->eiac_mask));
1921 if (icr & E1000_ICR_RXO) { 1921
1922 ew32(ICR, E1000_ICR_RXO);
1923 enable = false;
1924 /* napi poll will re-enable Other, make sure it runs */
1925 if (napi_schedule_prep(&adapter->napi)) {
1926 adapter->total_rx_bytes = 0;
1927 adapter->total_rx_packets = 0;
1928 __napi_schedule(&adapter->napi);
1929 }
1930 }
1931 if (icr & E1000_ICR_LSC) { 1922 if (icr & E1000_ICR_LSC) {
1932 ew32(ICR, E1000_ICR_LSC);
1933 hw->mac.get_link_status = true; 1923 hw->mac.get_link_status = true;
1934 /* guard against interrupt when we're going down */ 1924 /* guard against interrupt when we're going down */
1935 if (!test_bit(__E1000_DOWN, &adapter->state)) 1925 if (!test_bit(__E1000_DOWN, &adapter->state))
1936 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1926 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1937 } 1927 }
1938 1928
1939 if (enable && !test_bit(__E1000_DOWN, &adapter->state)) 1929 if (!test_bit(__E1000_DOWN, &adapter->state))
1940 ew32(IMS, E1000_IMS_OTHER); 1930 ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
1941 1931
1942 return IRQ_HANDLED; 1932 return IRQ_HANDLED;
1943} 1933}
@@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
2040 hw->hw_addr + E1000_EITR_82574(vector)); 2030 hw->hw_addr + E1000_EITR_82574(vector));
2041 else 2031 else
2042 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 2032 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2043 adapter->eiac_mask |= E1000_IMS_OTHER;
2044 2033
2045 /* Cause Tx interrupts on every write back */ 2034 /* Cause Tx interrupts on every write back */
2046 ivar |= BIT(31); 2035 ivar |= BIT(31);
@@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
2265 2254
2266 if (adapter->msix_entries) { 2255 if (adapter->msix_entries) {
2267 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2256 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2268 ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); 2257 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
2258 IMS_OTHER_MASK);
2269 } else if (hw->mac.type >= e1000_pch_lpt) { 2259 } else if (hw->mac.type >= e1000_pch_lpt) {
2270 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2260 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2271 } else { 2261 } else {
@@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2333{ 2323{
2334 struct pci_dev *pdev = adapter->pdev; 2324 struct pci_dev *pdev = adapter->pdev;
2335 2325
2336 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2326 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
2337 GFP_KERNEL); 2327 GFP_KERNEL);
2338 if (!ring->desc) 2328 if (!ring->desc)
2339 return -ENOMEM; 2329 return -ENOMEM;
2340 2330
@@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2707 napi_complete_done(napi, work_done); 2697 napi_complete_done(napi, work_done);
2708 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2698 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2709 if (adapter->msix_entries) 2699 if (adapter->msix_entries)
2710 ew32(IMS, adapter->rx_ring->ims_val | 2700 ew32(IMS, adapter->rx_ring->ims_val);
2711 E1000_IMS_OTHER);
2712 else 2701 else
2713 e1000_irq_enable(adapter); 2702 e1000_irq_enable(adapter);
2714 } 2703 }
@@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
5101 case e1000_media_type_copper: 5090 case e1000_media_type_copper:
5102 if (hw->mac.get_link_status) { 5091 if (hw->mac.get_link_status) {
5103 ret_val = hw->mac.ops.check_for_link(hw); 5092 ret_val = hw->mac.ops.check_for_link(hw);
5104 link_active = ret_val > 0; 5093 link_active = !hw->mac.get_link_status;
5105 } else { 5094 } else {
5106 link_active = true; 5095 link_active = true;
5107 } 5096 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 21d29f7936f6..d39b0b7011b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
124 trigger_cmd_completions(dev); 124 trigger_cmd_completions(dev);
125 } 125 }
126 126
127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 127 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
128 mlx5_core_err(dev, "end\n"); 128 mlx5_core_err(dev, "end\n");
129 129
130unlock: 130unlock:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index ba338428ffd1..3c0d882ba183 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
443} 443}
444EXPORT_SYMBOL(mlxsw_afa_block_jump); 444EXPORT_SYMBOL(mlxsw_afa_block_jump);
445 445
446int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
447{
448 if (block->finished)
449 return -EINVAL;
450 mlxsw_afa_set_goto_set(block->cur_set,
451 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
452 block->finished = true;
453 return 0;
454}
455EXPORT_SYMBOL(mlxsw_afa_block_terminate);
456
446static struct mlxsw_afa_fwd_entry * 457static struct mlxsw_afa_fwd_entry *
447mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) 458mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
448{ 459{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 6dd601703c99..3a155d104384 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -67,6 +67,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
67u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); 67u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
68int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); 68int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
69int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); 69int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
70int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
70int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); 71int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
71int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); 72int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
72int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, 73int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 92194a9b2caf..21bee8f19894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -535,6 +535,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
535int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); 535int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
536int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 536int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
537 u16 group_id); 537 u16 group_id);
538int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
538int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); 539int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
539int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); 540int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
540int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, 541int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 1c1601a43978..79b1fa27a9a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -518,6 +518,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
518 return mlxsw_afa_block_jump(rulei->act_block, group_id); 518 return mlxsw_afa_block_jump(rulei->act_block, group_id);
519} 519}
520 520
521int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
522{
523 return mlxsw_afa_block_terminate(rulei->act_block);
524}
525
521int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) 526int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
522{ 527{
523 return mlxsw_afa_block_append_drop(rulei->act_block); 528 return mlxsw_afa_block_append_drop(rulei->act_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 93728c694e6d..0a9adc5962fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
385 385
386static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { 386static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
387 MLXSW_SP_CPU_PORT_SB_CM, 387 MLXSW_SP_CPU_PORT_SB_CM,
388 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
389 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
390 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
391 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
392 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
388 MLXSW_SP_CPU_PORT_SB_CM, 393 MLXSW_SP_CPU_PORT_SB_CM,
389 MLXSW_SP_CPU_PORT_SB_CM, 394 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
390 MLXSW_SP_CPU_PORT_SB_CM,
391 MLXSW_SP_CPU_PORT_SB_CM,
392 MLXSW_SP_CPU_PORT_SB_CM,
393 MLXSW_SP_CPU_PORT_SB_CM,
394 MLXSW_SP_SB_CM(10000, 0, 0),
395 MLXSW_SP_CPU_PORT_SB_CM, 395 MLXSW_SP_CPU_PORT_SB_CM,
396 MLXSW_SP_CPU_PORT_SB_CM, 396 MLXSW_SP_CPU_PORT_SB_CM,
397 MLXSW_SP_CPU_PORT_SB_CM, 397 MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6ce00e28d4ea..89dbf569dff5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
65 tcf_exts_to_list(exts, &actions); 65 tcf_exts_to_list(exts, &actions);
66 list_for_each_entry(a, &actions, list) { 66 list_for_each_entry(a, &actions, list) {
67 if (is_tcf_gact_ok(a)) { 67 if (is_tcf_gact_ok(a)) {
68 err = mlxsw_sp_acl_rulei_act_continue(rulei); 68 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
69 if (err) 69 if (err)
70 return err; 70 return err;
71 } else if (is_tcf_gact_shot(a)) { 71 } else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 948aceb512c5..4b87ec20e658 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -51,6 +51,9 @@ struct mlxsw_sp_span_inspected_port {
51 struct list_head list; 51 struct list_head list;
52 enum mlxsw_sp_span_type type; 52 enum mlxsw_sp_span_type type;
53 u8 local_port; 53 u8 local_port;
54
55 /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
56 bool bound;
54}; 57};
55 58
56struct mlxsw_sp_span_parms { 59struct mlxsw_sp_span_parms {
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index a10ef50e4f12..017fb2322589 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -1,16 +1,16 @@
1# 1#
2# National Semi-conductor device configuration 2# National Semiconductor device configuration
3# 3#
4 4
5config NET_VENDOR_NATSEMI 5config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semiconductor devices"
7 default y 7 default y
8 ---help--- 8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y. 9 If you have a network (Ethernet) card belonging to this class, say Y.
10 10
11 Note that the answer to this question doesn't directly affect the 11 Note that the answer to this question doesn't directly affect the
12 kernel: saying N will just cause the configurator to skip all 12 kernel: saying N will just cause the configurator to skip all
13 the questions about National Semi-conductor devices. If you say Y, 13 the questions about National Semiconductor devices. If you say Y,
14 you will be asked for your specific card in the following questions. 14 you will be asked for your specific card in the following questions.
15 15
16if NET_VENDOR_NATSEMI 16if NET_VENDOR_NATSEMI
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index cc664977596e..a759aa09ef59 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# 2#
3# Makefile for the National Semi-conductor Sonic devices. 3# Makefile for the National Semiconductor Sonic devices.
4# 4#
5 5
6obj-$(CONFIG_MACSONIC) += macsonic.o 6obj-$(CONFIG_MACSONIC) += macsonic.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 6f546e869d8d..00f41c145d4d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2480 if (rc) 2480 if (rc)
2481 return rc; 2481 return rc;
2482 2482
2483 /* Free Task CXT */ 2483 /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2484 * RoCE and iWARP )
2485 */
2486 proto = PROTOCOLID_ROCE;
2484 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, 2487 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2485 qed_cxt_get_proto_tid_count(p_hwfn, proto)); 2488 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2486 if (rc) 2489 if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 03ad4eeac7f8..69051e98aff9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1703 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); 1703 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1704 1704
1705 if (eth_type == ETH_P_IP) { 1705 if (eth_type == ETH_P_IP) {
1706 if (iph->protocol != IPPROTO_TCP) {
1707 DP_NOTICE(p_hwfn,
1708 "Unexpected ip protocol on ll2 %x\n",
1709 iph->protocol);
1710 return -EINVAL;
1711 }
1712
1706 cm_info->local_ip[0] = ntohl(iph->daddr); 1713 cm_info->local_ip[0] = ntohl(iph->daddr);
1707 cm_info->remote_ip[0] = ntohl(iph->saddr); 1714 cm_info->remote_ip[0] = ntohl(iph->saddr);
1708 cm_info->ip_version = TCP_IPV4; 1715 cm_info->ip_version = TCP_IPV4;
@@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1711 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1718 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1712 } else if (eth_type == ETH_P_IPV6) { 1719 } else if (eth_type == ETH_P_IPV6) {
1713 ip6h = (struct ipv6hdr *)iph; 1720 ip6h = (struct ipv6hdr *)iph;
1721
1722 if (ip6h->nexthdr != IPPROTO_TCP) {
1723 DP_NOTICE(p_hwfn,
1724 "Unexpected ip protocol on ll2 %x\n",
1725 iph->protocol);
1726 return -EINVAL;
1727 }
1728
1714 for (i = 0; i < 4; i++) { 1729 for (i = 0; i < 4; i++) {
1715 cm_info->local_ip[i] = 1730 cm_info->local_ip[i] =
1716 ntohl(ip6h->daddr.in6_u.u6_addr32[i]); 1731 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
@@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1928 /* Missing lower byte is now available */ 1943 /* Missing lower byte is now available */
1929 mpa_len = fpdu->fpdu_length | *mpa_data; 1944 mpa_len = fpdu->fpdu_length | *mpa_data;
1930 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1945 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1931 fpdu->mpa_frag_len = fpdu->fpdu_length;
1932 /* one byte of hdr */ 1946 /* one byte of hdr */
1947 fpdu->mpa_frag_len = 1;
1933 fpdu->incomplete_bytes = fpdu->fpdu_length - 1; 1948 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1934 DP_VERBOSE(p_hwfn, 1949 DP_VERBOSE(p_hwfn,
1935 QED_MSG_RDMA, 1950 QED_MSG_RDMA,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5d040b873137..a411f9c702a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
380 380
381 qed_rdma_free_reserved_lkey(p_hwfn); 381 qed_rdma_free_reserved_lkey(p_hwfn);
382 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
382 qed_rdma_resc_free(p_hwfn); 383 qed_rdma_resc_free(p_hwfn);
383} 384}
384 385
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 2db70eabddfe..a01e7d6e5442 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -288,7 +288,7 @@ int __init qede_init(void)
288 } 288 }
289 289
290 /* Must register notifier before pci ops, since we might miss 290 /* Must register notifier before pci ops, since we might miss
291 * interface rename after pci probe and netdev registeration. 291 * interface rename after pci probe and netdev registration.
292 */ 292 */
293 ret = register_netdevice_notifier(&qede_netdev_notifier); 293 ret = register_netdevice_notifier(&qede_netdev_notifier);
294 if (ret) { 294 if (ret) {
@@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
988 if (rc) 988 if (rc)
989 goto err3; 989 goto err3;
990 990
991 /* Prepare the lock prior to the registeration of the netdev, 991 /* Prepare the lock prior to the registration of the netdev,
992 * as once it's registered we might reach flows requiring it 992 * as once it's registered we might reach flows requiring it
993 * [it's even possible to reach a flow needing it directly 993 * [it's even possible to reach a flow needing it directly
994 * from there, although it's unlikely]. 994 * from there, although it's unlikely].
@@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2067 link_params.link_up = true; 2067 link_params.link_up = true;
2068 edev->ops->common->set_link(edev->cdev, &link_params); 2068 edev->ops->common->set_link(edev->cdev, &link_params);
2069 2069
2070 qede_rdma_dev_event_open(edev);
2071
2072 edev->state = QEDE_STATE_OPEN; 2070 edev->state = QEDE_STATE_OPEN;
2073 2071
2074 DP_INFO(edev, "Ending successfully qede load\n"); 2072 DP_INFO(edev, "Ending successfully qede load\n");
@@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
2169 DP_NOTICE(edev, "Link is up\n"); 2167 DP_NOTICE(edev, "Link is up\n");
2170 netif_tx_start_all_queues(edev->ndev); 2168 netif_tx_start_all_queues(edev->ndev);
2171 netif_carrier_on(edev->ndev); 2169 netif_carrier_on(edev->ndev);
2170 qede_rdma_dev_event_open(edev);
2172 } 2171 }
2173 } else { 2172 } else {
2174 if (netif_carrier_ok(edev->ndev)) { 2173 if (netif_carrier_ok(edev->ndev)) {
2175 DP_NOTICE(edev, "Link is down\n"); 2174 DP_NOTICE(edev, "Link is down\n");
2176 netif_tx_disable(edev->ndev); 2175 netif_tx_disable(edev->ndev);
2177 netif_carrier_off(edev->ndev); 2176 netif_carrier_off(edev->ndev);
2177 qede_rdma_dev_event_close(edev);
2178 } 2178 }
2179 } 2179 }
2180} 2180}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 9b2280badaf7..02adb513f475 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); 485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
486 if (IS_ERR(ptp->clock)) { 486 if (IS_ERR(ptp->clock)) {
487 rc = -EINVAL; 487 rc = -EINVAL;
488 DP_ERR(edev, "PTP clock registeration failed\n"); 488 DP_ERR(edev, "PTP clock registration failed\n");
489 goto err2; 489 goto err2;
490 } 490 }
491 491
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 9cbb27263742..d5a32b7c7dc5 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
1194 while (tx_q->tpd.consume_idx != hw_consume_idx) { 1194 while (tx_q->tpd.consume_idx != hw_consume_idx) {
1195 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); 1195 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
1196 if (tpbuf->dma_addr) { 1196 if (tpbuf->dma_addr) {
1197 dma_unmap_single(adpt->netdev->dev.parent, 1197 dma_unmap_page(adpt->netdev->dev.parent,
1198 tpbuf->dma_addr, tpbuf->length, 1198 tpbuf->dma_addr, tpbuf->length,
1199 DMA_TO_DEVICE); 1199 DMA_TO_DEVICE);
1200 tpbuf->dma_addr = 0; 1200 tpbuf->dma_addr = 0;
1201 } 1201 }
1202 1202
@@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1353 1353
1354 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1354 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1355 tpbuf->length = mapped_len; 1355 tpbuf->length = mapped_len;
1356 tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1356 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1357 skb->data, tpbuf->length, 1357 virt_to_page(skb->data),
1358 DMA_TO_DEVICE); 1358 offset_in_page(skb->data),
1359 tpbuf->length,
1360 DMA_TO_DEVICE);
1359 ret = dma_mapping_error(adpt->netdev->dev.parent, 1361 ret = dma_mapping_error(adpt->netdev->dev.parent,
1360 tpbuf->dma_addr); 1362 tpbuf->dma_addr);
1361 if (ret) 1363 if (ret)
@@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1371 if (mapped_len < len) { 1373 if (mapped_len < len) {
1372 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1374 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1373 tpbuf->length = len - mapped_len; 1375 tpbuf->length = len - mapped_len;
1374 tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1376 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1375 skb->data + mapped_len, 1377 virt_to_page(skb->data +
1376 tpbuf->length, DMA_TO_DEVICE); 1378 mapped_len),
1379 offset_in_page(skb->data +
1380 mapped_len),
1381 tpbuf->length, DMA_TO_DEVICE);
1377 ret = dma_mapping_error(adpt->netdev->dev.parent, 1382 ret = dma_mapping_error(adpt->netdev->dev.parent,
1378 tpbuf->dma_addr); 1383 tpbuf->dma_addr);
1379 if (ret) 1384 if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 012fb66eed8d..f0afb88d7bc2 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2335 pdata = netdev_priv(dev); 2335 pdata = netdev_priv(dev);
2336 BUG_ON(!pdata); 2336 BUG_ON(!pdata);
2337 BUG_ON(!pdata->ioaddr); 2337 BUG_ON(!pdata->ioaddr);
2338 WARN_ON(dev->phydev);
2339 2338
2340 SMSC_TRACE(pdata, ifdown, "Stopping driver"); 2339 SMSC_TRACE(pdata, ifdown, "Stopping driver");
2341 2340
2341 unregister_netdev(dev);
2342
2342 mdiobus_unregister(pdata->mii_bus); 2343 mdiobus_unregister(pdata->mii_bus);
2343 mdiobus_free(pdata->mii_bus); 2344 mdiobus_free(pdata->mii_bus);
2344 2345
2345 unregister_netdev(dev);
2346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2347 "smsc911x-memory"); 2347 "smsc911x-memory");
2348 if (!res) 2348 if (!res)
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 111e7ca9df56..f5c5984afefb 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev)
1295 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); 1295 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1296 writel(val, priv->base + AVE_IIRQC); 1296 writel(val, priv->base + AVE_IIRQC);
1297 1297
1298 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; 1298 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1299 ave_irq_restore(ndev, val); 1299 ave_irq_restore(ndev, val);
1300 1300
1301 napi_enable(&priv->napi_rx); 1301 napi_enable(&priv->napi_rx);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 63d3d6b215f3..a94f50442613 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
312 dev->ethtool_ops = &vnet_ethtool_ops; 312 dev->ethtool_ops = &vnet_ethtool_ops;
313 dev->watchdog_timeo = VNET_TX_TIMEOUT; 313 dev->watchdog_timeo = VNET_TX_TIMEOUT;
314 314
315 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | 315 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
316 NETIF_F_HW_CSUM | NETIF_F_SG; 316 NETIF_F_HW_CSUM | NETIF_F_SG;
317 dev->features = dev->hw_features; 317 dev->features = dev->hw_features;
318 318
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 8af8891078e2..1b4af54a4968 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1075,7 +1075,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
1075 /* set speed_in input in case RMII mode is used in 100Mbps */ 1075 /* set speed_in input in case RMII mode is used in 100Mbps */
1076 if (phy->speed == 100) 1076 if (phy->speed == 100)
1077 mac_control |= BIT(15); 1077 mac_control |= BIT(15);
1078 else if (phy->speed == 10) 1078 /* in band mode only works in 10Mbps RGMII mode */
1079 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1079 mac_control |= BIT(18); /* In Band mode */ 1080 mac_control |= BIT(18); /* In Band mode */
1080 1081
1081 if (priv->rx_pause) 1082 if (priv->rx_pause)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 0db3bd1ea06f..32861036c3fc 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
173 struct list_head req_list; 173 struct list_head req_list;
174 174
175 struct work_struct mcast_work; 175 struct work_struct mcast_work;
176 u32 filter;
176 177
177 bool link_state; /* 0 - link up, 1 - link down */ 178 bool link_state; /* 0 - link up, 1 - link down */
178 179
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
212 213
213void rndis_set_subchannel(struct work_struct *w); 214void rndis_set_subchannel(struct work_struct *w);
214bool rndis_filter_opened(const struct netvsc_device *nvdev);
215int rndis_filter_open(struct netvsc_device *nvdev); 215int rndis_filter_open(struct netvsc_device *nvdev);
216int rndis_filter_close(struct netvsc_device *nvdev); 216int rndis_filter_close(struct netvsc_device *nvdev);
217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index aa95e81af6e5..4123d081b1c7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -93,6 +93,11 @@ static void free_netvsc_device(struct rcu_head *head)
93 = container_of(head, struct netvsc_device, rcu); 93 = container_of(head, struct netvsc_device, rcu);
94 int i; 94 int i;
95 95
96 kfree(nvdev->extension);
97 vfree(nvdev->recv_buf);
98 vfree(nvdev->send_buf);
99 kfree(nvdev->send_section_map);
100
96 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 101 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
97 vfree(nvdev->chan_table[i].mrc.slots); 102 vfree(nvdev->chan_table[i].mrc.slots);
98 103
@@ -218,12 +223,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
218 net_device->recv_buf_gpadl_handle = 0; 223 net_device->recv_buf_gpadl_handle = 0;
219 } 224 }
220 225
221 if (net_device->recv_buf) {
222 /* Free up the receive buffer */
223 vfree(net_device->recv_buf);
224 net_device->recv_buf = NULL;
225 }
226
227 if (net_device->send_buf_gpadl_handle) { 226 if (net_device->send_buf_gpadl_handle) {
228 ret = vmbus_teardown_gpadl(device->channel, 227 ret = vmbus_teardown_gpadl(device->channel,
229 net_device->send_buf_gpadl_handle); 228 net_device->send_buf_gpadl_handle);
@@ -238,12 +237,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
238 } 237 }
239 net_device->send_buf_gpadl_handle = 0; 238 net_device->send_buf_gpadl_handle = 0;
240 } 239 }
241 if (net_device->send_buf) {
242 /* Free up the send buffer */
243 vfree(net_device->send_buf);
244 net_device->send_buf = NULL;
245 }
246 kfree(net_device->send_section_map);
247} 240}
248 241
249int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) 242int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -580,26 +573,29 @@ void netvsc_device_remove(struct hv_device *device)
580 = rtnl_dereference(net_device_ctx->nvdev); 573 = rtnl_dereference(net_device_ctx->nvdev);
581 int i; 574 int i;
582 575
583 cancel_work_sync(&net_device->subchan_work);
584
585 netvsc_revoke_buf(device, net_device); 576 netvsc_revoke_buf(device, net_device);
586 577
587 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 578 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
588 579
580 /* And disassociate NAPI context from device */
581 for (i = 0; i < net_device->num_chn; i++)
582 netif_napi_del(&net_device->chan_table[i].napi);
583
589 /* 584 /*
590 * At this point, no one should be accessing net_device 585 * At this point, no one should be accessing net_device
591 * except in here 586 * except in here
592 */ 587 */
593 netdev_dbg(ndev, "net device safe to remove\n"); 588 netdev_dbg(ndev, "net device safe to remove\n");
594 589
590 /* older versions require that buffer be revoked before close */
591 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
592 netvsc_teardown_gpadl(device, net_device);
593
595 /* Now, we can close the channel safely */ 594 /* Now, we can close the channel safely */
596 vmbus_close(device->channel); 595 vmbus_close(device->channel);
597 596
598 netvsc_teardown_gpadl(device, net_device); 597 if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
599 598 netvsc_teardown_gpadl(device, net_device);
600 /* And dissassociate NAPI context from device */
601 for (i = 0; i < net_device->num_chn; i++)
602 netif_napi_del(&net_device->chan_table[i].napi);
603 599
604 /* Release all resources */ 600 /* Release all resources */
605 free_netvsc_device_rcu(net_device); 601 free_netvsc_device_rcu(net_device);
@@ -663,14 +659,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
663 queue_sends = 659 queue_sends =
664 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); 660 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
665 661
666 if (net_device->destroy && queue_sends == 0) 662 if (unlikely(net_device->destroy)) {
667 wake_up(&net_device->wait_drain); 663 if (queue_sends == 0)
664 wake_up(&net_device->wait_drain);
665 } else {
666 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
668 667
669 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 668 if (netif_tx_queue_stopped(txq) &&
670 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 669 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
671 queue_sends < 1)) { 670 queue_sends < 1)) {
672 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); 671 netif_tx_wake_queue(txq);
673 ndev_ctx->eth_stats.wake_queue++; 672 ndev_ctx->eth_stats.wake_queue++;
673 }
674 } 674 }
675} 675}
676 676
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cdb78eefab67..f28c85d212ce 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,7 +46,10 @@
46 46
47#include "hyperv_net.h" 47#include "hyperv_net.h"
48 48
49#define RING_SIZE_MIN 64 49#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
50 53
51#define LINKCHANGE_INT (2 * HZ) 54#define LINKCHANGE_INT (2 * HZ)
52#define VF_TAKEOVER_INT (HZ / 10) 55#define VF_TAKEOVER_INT (HZ / 10)
@@ -89,15 +92,20 @@ static void netvsc_change_rx_flags(struct net_device *net, int change)
89static void netvsc_set_rx_mode(struct net_device *net) 92static void netvsc_set_rx_mode(struct net_device *net)
90{ 93{
91 struct net_device_context *ndev_ctx = netdev_priv(net); 94 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 95 struct net_device *vf_netdev;
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 96 struct netvsc_device *nvdev;
94 97
98 rcu_read_lock();
99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
95 if (vf_netdev) { 100 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net); 101 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net); 102 dev_mc_sync(vf_netdev, net);
98 } 103 }
99 104
100 rndis_filter_update(nvdev); 105 nvdev = rcu_dereference(ndev_ctx->nvdev);
106 if (nvdev)
107 rndis_filter_update(nvdev);
108 rcu_read_unlock();
101} 109}
102 110
103static int netvsc_open(struct net_device *net) 111static int netvsc_open(struct net_device *net)
@@ -118,10 +126,8 @@ static int netvsc_open(struct net_device *net)
118 } 126 }
119 127
120 rdev = nvdev->extension; 128 rdev = nvdev->extension;
121 if (!rdev->link_state) { 129 if (!rdev->link_state)
122 netif_carrier_on(net); 130 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
125 131
126 if (vf_netdev) { 132 if (vf_netdev) {
127 /* Setting synthetic device up transparently sets 133 /* Setting synthetic device up transparently sets
@@ -137,36 +143,25 @@ static int netvsc_open(struct net_device *net)
137 return 0; 143 return 0;
138} 144}
139 145
140static int netvsc_close(struct net_device *net) 146static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
141{ 147{
142 struct net_device_context *net_device_ctx = netdev_priv(net); 148 unsigned int retry = 0;
143 struct net_device *vf_netdev 149 int i;
144 = rtnl_dereference(net_device_ctx->vf_netdev);
145 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
146 int ret = 0;
147 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
148 struct vmbus_channel *chn;
149
150 netif_tx_disable(net);
151
152 /* No need to close rndis filter if it is removed already */
153 if (!nvdev)
154 goto out;
155
156 ret = rndis_filter_close(nvdev);
157 if (ret != 0) {
158 netdev_err(net, "unable to close device (ret %d).\n", ret);
159 return ret;
160 }
161 150
162 /* Ensure pending bytes in ring are read */ 151 /* Ensure pending bytes in ring are read */
163 while (true) { 152 for (;;) {
164 aread = 0; 153 u32 aread = 0;
154
165 for (i = 0; i < nvdev->num_chn; i++) { 155 for (i = 0; i < nvdev->num_chn; i++) {
166 chn = nvdev->chan_table[i].channel; 156 struct vmbus_channel *chn
157 = nvdev->chan_table[i].channel;
158
167 if (!chn) 159 if (!chn)
168 continue; 160 continue;
169 161
162 /* make sure receive not running now */
163 napi_synchronize(&nvdev->chan_table[i].napi);
164
170 aread = hv_get_bytes_to_read(&chn->inbound); 165 aread = hv_get_bytes_to_read(&chn->inbound);
171 if (aread) 166 if (aread)
172 break; 167 break;
@@ -176,22 +171,40 @@ static int netvsc_close(struct net_device *net)
176 break; 171 break;
177 } 172 }
178 173
179 retry++; 174 if (aread == 0)
180 if (retry > retry_max || aread == 0) 175 return 0;
181 break;
182 176
183 msleep(msec); 177 if (++retry > RETRY_MAX)
178 return -ETIMEDOUT;
184 179
185 if (msec < 1000) 180 usleep_range(RETRY_US_LO, RETRY_US_HI);
186 msec *= 2;
187 } 181 }
182}
188 183
189 if (aread) { 184static int netvsc_close(struct net_device *net)
190 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 185{
191 ret = -ETIMEDOUT; 186 struct net_device_context *net_device_ctx = netdev_priv(net);
187 struct net_device *vf_netdev
188 = rtnl_dereference(net_device_ctx->vf_netdev);
189 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
190 int ret;
191
192 netif_tx_disable(net);
193
194 /* No need to close rndis filter if it is removed already */
195 if (!nvdev)
196 return 0;
197
198 ret = rndis_filter_close(nvdev);
199 if (ret != 0) {
200 netdev_err(net, "unable to close device (ret %d).\n", ret);
201 return ret;
192 } 202 }
193 203
194out: 204 ret = netvsc_wait_until_empty(nvdev);
205 if (ret)
206 netdev_err(net, "Ring buffer not empty after closing rndis\n");
207
195 if (vf_netdev) 208 if (vf_netdev)
196 dev_close(vf_netdev); 209 dev_close(vf_netdev);
197 210
@@ -840,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,
840 } 853 }
841} 854}
842 855
856static int netvsc_detach(struct net_device *ndev,
857 struct netvsc_device *nvdev)
858{
859 struct net_device_context *ndev_ctx = netdev_priv(ndev);
860 struct hv_device *hdev = ndev_ctx->device_ctx;
861 int ret;
862
863 /* Don't try continuing to try and setup sub channels */
864 if (cancel_work_sync(&nvdev->subchan_work))
865 nvdev->num_chn = 1;
866
867 /* If device was up (receiving) then shutdown */
868 if (netif_running(ndev)) {
869 netif_tx_disable(ndev);
870
871 ret = rndis_filter_close(nvdev);
872 if (ret) {
873 netdev_err(ndev,
874 "unable to close device (ret %d).\n", ret);
875 return ret;
876 }
877
878 ret = netvsc_wait_until_empty(nvdev);
879 if (ret) {
880 netdev_err(ndev,
881 "Ring buffer not empty after closing rndis\n");
882 return ret;
883 }
884 }
885
886 netif_device_detach(ndev);
887
888 rndis_filter_device_remove(hdev, nvdev);
889
890 return 0;
891}
892
893static int netvsc_attach(struct net_device *ndev,
894 struct netvsc_device_info *dev_info)
895{
896 struct net_device_context *ndev_ctx = netdev_priv(ndev);
897 struct hv_device *hdev = ndev_ctx->device_ctx;
898 struct netvsc_device *nvdev;
899 struct rndis_device *rdev;
900 int ret;
901
902 nvdev = rndis_filter_device_add(hdev, dev_info);
903 if (IS_ERR(nvdev))
904 return PTR_ERR(nvdev);
905
906 /* Note: enable and attach happen when sub-channels setup */
907
908 netif_carrier_off(ndev);
909
910 if (netif_running(ndev)) {
911 ret = rndis_filter_open(nvdev);
912 if (ret)
913 return ret;
914
915 rdev = nvdev->extension;
916 if (!rdev->link_state)
917 netif_carrier_on(ndev);
918 }
919
920 return 0;
921}
922
843static int netvsc_set_channels(struct net_device *net, 923static int netvsc_set_channels(struct net_device *net,
844 struct ethtool_channels *channels) 924 struct ethtool_channels *channels)
845{ 925{
846 struct net_device_context *net_device_ctx = netdev_priv(net); 926 struct net_device_context *net_device_ctx = netdev_priv(net);
847 struct hv_device *dev = net_device_ctx->device_ctx;
848 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 927 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
849 unsigned int orig, count = channels->combined_count; 928 unsigned int orig, count = channels->combined_count;
850 struct netvsc_device_info device_info; 929 struct netvsc_device_info device_info;
851 bool was_opened; 930 int ret;
852 int ret = 0;
853 931
854 /* We do not support separate count for rx, tx, or other */ 932 /* We do not support separate count for rx, tx, or other */
855 if (count == 0 || 933 if (count == 0 ||
@@ -866,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,
866 return -EINVAL; 944 return -EINVAL;
867 945
868 orig = nvdev->num_chn; 946 orig = nvdev->num_chn;
869 was_opened = rndis_filter_opened(nvdev);
870 if (was_opened)
871 rndis_filter_close(nvdev);
872 947
873 memset(&device_info, 0, sizeof(device_info)); 948 memset(&device_info, 0, sizeof(device_info));
874 device_info.num_chn = count; 949 device_info.num_chn = count;
@@ -877,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,
877 device_info.recv_sections = nvdev->recv_section_cnt; 952 device_info.recv_sections = nvdev->recv_section_cnt;
878 device_info.recv_section_size = nvdev->recv_section_size; 953 device_info.recv_section_size = nvdev->recv_section_size;
879 954
880 rndis_filter_device_remove(dev, nvdev); 955 ret = netvsc_detach(net, nvdev);
956 if (ret)
957 return ret;
881 958
882 nvdev = rndis_filter_device_add(dev, &device_info); 959 ret = netvsc_attach(net, &device_info);
883 if (IS_ERR(nvdev)) { 960 if (ret) {
884 ret = PTR_ERR(nvdev);
885 device_info.num_chn = orig; 961 device_info.num_chn = orig;
886 nvdev = rndis_filter_device_add(dev, &device_info); 962 if (netvsc_attach(net, &device_info))
887 963 netdev_err(net, "restoring channel setting failed\n");
888 if (IS_ERR(nvdev)) {
889 netdev_err(net, "restoring channel setting failed: %ld\n",
890 PTR_ERR(nvdev));
891 return ret;
892 }
893 } 964 }
894 965
895 if (was_opened)
896 rndis_filter_open(nvdev);
897
898 /* We may have missed link change notifications */
899 net_device_ctx->last_reconfig = 0;
900 schedule_delayed_work(&net_device_ctx->dwork, 0);
901
902 return ret; 966 return ret;
903} 967}
904 968
@@ -964,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
964 struct net_device_context *ndevctx = netdev_priv(ndev); 1028 struct net_device_context *ndevctx = netdev_priv(ndev);
965 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1029 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
966 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1030 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
967 struct hv_device *hdev = ndevctx->device_ctx;
968 int orig_mtu = ndev->mtu; 1031 int orig_mtu = ndev->mtu;
969 struct netvsc_device_info device_info; 1032 struct netvsc_device_info device_info;
970 bool was_opened;
971 int ret = 0; 1033 int ret = 0;
972 1034
973 if (!nvdev || nvdev->destroy) 1035 if (!nvdev || nvdev->destroy)
@@ -980,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
980 return ret; 1042 return ret;
981 } 1043 }
982 1044
983 netif_device_detach(ndev);
984 was_opened = rndis_filter_opened(nvdev);
985 if (was_opened)
986 rndis_filter_close(nvdev);
987
988 memset(&device_info, 0, sizeof(device_info)); 1045 memset(&device_info, 0, sizeof(device_info));
989 device_info.num_chn = nvdev->num_chn; 1046 device_info.num_chn = nvdev->num_chn;
990 device_info.send_sections = nvdev->send_section_cnt; 1047 device_info.send_sections = nvdev->send_section_cnt;
@@ -992,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
992 device_info.recv_sections = nvdev->recv_section_cnt; 1049 device_info.recv_sections = nvdev->recv_section_cnt;
993 device_info.recv_section_size = nvdev->recv_section_size; 1050 device_info.recv_section_size = nvdev->recv_section_size;
994 1051
995 rndis_filter_device_remove(hdev, nvdev); 1052 ret = netvsc_detach(ndev, nvdev);
1053 if (ret)
1054 goto rollback_vf;
996 1055
997 ndev->mtu = mtu; 1056 ndev->mtu = mtu;
998 1057
999 nvdev = rndis_filter_device_add(hdev, &device_info); 1058 ret = netvsc_attach(ndev, &device_info);
1000 if (IS_ERR(nvdev)) { 1059 if (ret)
1001 ret = PTR_ERR(nvdev); 1060 goto rollback;
1002
1003 /* Attempt rollback to original MTU */
1004 ndev->mtu = orig_mtu;
1005 nvdev = rndis_filter_device_add(hdev, &device_info);
1006
1007 if (vf_netdev)
1008 dev_set_mtu(vf_netdev, orig_mtu);
1009
1010 if (IS_ERR(nvdev)) {
1011 netdev_err(ndev, "restoring mtu failed: %ld\n",
1012 PTR_ERR(nvdev));
1013 return ret;
1014 }
1015 }
1016 1061
1017 if (was_opened) 1062 return 0;
1018 rndis_filter_open(nvdev);
1019 1063
1020 netif_device_attach(ndev); 1064rollback:
1065 /* Attempt rollback to original MTU */
1066 ndev->mtu = orig_mtu;
1021 1067
1022 /* We may have missed link change notifications */ 1068 if (netvsc_attach(ndev, &device_info))
1023 schedule_delayed_work(&ndevctx->dwork, 0); 1069 netdev_err(ndev, "restoring mtu failed\n");
1070rollback_vf:
1071 if (vf_netdev)
1072 dev_set_mtu(vf_netdev, orig_mtu);
1024 1073
1025 return ret; 1074 return ret;
1026} 1075}
@@ -1526,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1526{ 1575{
1527 struct net_device_context *ndevctx = netdev_priv(ndev); 1576 struct net_device_context *ndevctx = netdev_priv(ndev);
1528 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1577 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1529 struct hv_device *hdev = ndevctx->device_ctx;
1530 struct netvsc_device_info device_info; 1578 struct netvsc_device_info device_info;
1531 struct ethtool_ringparam orig; 1579 struct ethtool_ringparam orig;
1532 u32 new_tx, new_rx; 1580 u32 new_tx, new_rx;
1533 bool was_opened;
1534 int ret = 0; 1581 int ret = 0;
1535 1582
1536 if (!nvdev || nvdev->destroy) 1583 if (!nvdev || nvdev->destroy)
@@ -1555,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1555 device_info.recv_sections = new_rx; 1602 device_info.recv_sections = new_rx;
1556 device_info.recv_section_size = nvdev->recv_section_size; 1603 device_info.recv_section_size = nvdev->recv_section_size;
1557 1604
1558 netif_device_detach(ndev); 1605 ret = netvsc_detach(ndev, nvdev);
1559 was_opened = rndis_filter_opened(nvdev); 1606 if (ret)
1560 if (was_opened) 1607 return ret;
1561 rndis_filter_close(nvdev);
1562
1563 rndis_filter_device_remove(hdev, nvdev);
1564
1565 nvdev = rndis_filter_device_add(hdev, &device_info);
1566 if (IS_ERR(nvdev)) {
1567 ret = PTR_ERR(nvdev);
1568 1608
1609 ret = netvsc_attach(ndev, &device_info);
1610 if (ret) {
1569 device_info.send_sections = orig.tx_pending; 1611 device_info.send_sections = orig.tx_pending;
1570 device_info.recv_sections = orig.rx_pending; 1612 device_info.recv_sections = orig.rx_pending;
1571 nvdev = rndis_filter_device_add(hdev, &device_info);
1572 if (IS_ERR(nvdev)) {
1573 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1574 PTR_ERR(nvdev));
1575 return ret;
1576 }
1577 }
1578
1579 if (was_opened)
1580 rndis_filter_open(nvdev);
1581 netif_device_attach(ndev);
1582 1613
1583 /* We may have missed link change notifications */ 1614 if (netvsc_attach(ndev, &device_info))
1584 ndevctx->last_reconfig = 0; 1615 netdev_err(ndev, "restoring ringparam failed");
1585 schedule_delayed_work(&ndevctx->dwork, 0); 1616 }
1586 1617
1587 return ret; 1618 return ret;
1588} 1619}
@@ -1846,8 +1877,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1846 1877
1847 /* set multicast etc flags on VF */ 1878 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); 1879 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1880
1881 /* sync address list from ndev to VF */
1882 netif_addr_lock_bh(ndev);
1849 dev_uc_sync(vf_netdev, ndev); 1883 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev); 1884 dev_mc_sync(vf_netdev, ndev);
1885 netif_addr_unlock_bh(ndev);
1851 1886
1852 if (netif_running(ndev)) { 1887 if (netif_running(ndev)) {
1853 ret = dev_open(vf_netdev); 1888 ret = dev_open(vf_netdev);
@@ -2063,8 +2098,8 @@ no_net:
2063static int netvsc_remove(struct hv_device *dev) 2098static int netvsc_remove(struct hv_device *dev)
2064{ 2099{
2065 struct net_device_context *ndev_ctx; 2100 struct net_device_context *ndev_ctx;
2066 struct net_device *vf_netdev; 2101 struct net_device *vf_netdev, *net;
2067 struct net_device *net; 2102 struct netvsc_device *nvdev;
2068 2103
2069 net = hv_get_drvdata(dev); 2104 net = hv_get_drvdata(dev);
2070 if (net == NULL) { 2105 if (net == NULL) {
@@ -2074,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)
2074 2109
2075 ndev_ctx = netdev_priv(net); 2110 ndev_ctx = netdev_priv(net);
2076 2111
2077 netif_device_detach(net);
2078
2079 cancel_delayed_work_sync(&ndev_ctx->dwork); 2112 cancel_delayed_work_sync(&ndev_ctx->dwork);
2080 2113
2114 rcu_read_lock();
2115 nvdev = rcu_dereference(ndev_ctx->nvdev);
2116
2117 if (nvdev)
2118 cancel_work_sync(&nvdev->subchan_work);
2119
2081 /* 2120 /*
2082 * Call to the vsc driver to let it know that the device is being 2121 * Call to the vsc driver to let it know that the device is being
2083 * removed. Also blocks mtu and channel changes. 2122 * removed. Also blocks mtu and channel changes.
@@ -2087,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)
2087 if (vf_netdev) 2126 if (vf_netdev)
2088 netvsc_unregister_vf(vf_netdev); 2127 netvsc_unregister_vf(vf_netdev);
2089 2128
2129 if (nvdev)
2130 rndis_filter_device_remove(dev, nvdev);
2131
2090 unregister_netdevice(net); 2132 unregister_netdevice(net);
2091 2133
2092 rndis_filter_device_remove(dev,
2093 rtnl_dereference(ndev_ctx->nvdev));
2094 rtnl_unlock(); 2134 rtnl_unlock();
2135 rcu_read_unlock();
2095 2136
2096 hv_set_drvdata(dev, NULL); 2137 hv_set_drvdata(dev, NULL);
2097 2138
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2dc00f714482..020f8bc54386 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -267,13 +267,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,
267 } 267 }
268} 268}
269 269
270static void rndis_filter_receive_response(struct rndis_device *dev, 270static void rndis_filter_receive_response(struct net_device *ndev,
271 struct rndis_message *resp) 271 struct netvsc_device *nvdev,
272 const struct rndis_message *resp)
272{ 273{
274 struct rndis_device *dev = nvdev->extension;
273 struct rndis_request *request = NULL; 275 struct rndis_request *request = NULL;
274 bool found = false; 276 bool found = false;
275 unsigned long flags; 277 unsigned long flags;
276 struct net_device *ndev = dev->ndev; 278
279 /* This should never happen, it means control message
280 * response received after device removed.
281 */
282 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
283 netdev_err(ndev,
284 "got rndis message uninitialized\n");
285 return;
286 }
277 287
278 spin_lock_irqsave(&dev->request_lock, flags); 288 spin_lock_irqsave(&dev->request_lock, flags);
279 list_for_each_entry(request, &dev->req_list, list_ent) { 289 list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,7 +365,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
355 365
356static int rndis_filter_receive_data(struct net_device *ndev, 366static int rndis_filter_receive_data(struct net_device *ndev,
357 struct netvsc_device *nvdev, 367 struct netvsc_device *nvdev,
358 struct rndis_device *dev,
359 struct rndis_message *msg, 368 struct rndis_message *msg,
360 struct vmbus_channel *channel, 369 struct vmbus_channel *channel,
361 void *data, u32 data_buflen) 370 void *data, u32 data_buflen)
@@ -375,7 +384,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
375 * should be the data packet size plus the trailer padding size 384 * should be the data packet size plus the trailer padding size
376 */ 385 */
377 if (unlikely(data_buflen < rndis_pkt->data_len)) { 386 if (unlikely(data_buflen < rndis_pkt->data_len)) {
378 netdev_err(dev->ndev, "rndis message buffer " 387 netdev_err(ndev, "rndis message buffer "
379 "overflow detected (got %u, min %u)" 388 "overflow detected (got %u, min %u)"
380 "...dropping this message!\n", 389 "...dropping this message!\n",
381 data_buflen, rndis_pkt->data_len); 390 data_buflen, rndis_pkt->data_len);
@@ -403,35 +412,20 @@ int rndis_filter_receive(struct net_device *ndev,
403 void *data, u32 buflen) 412 void *data, u32 buflen)
404{ 413{
405 struct net_device_context *net_device_ctx = netdev_priv(ndev); 414 struct net_device_context *net_device_ctx = netdev_priv(ndev);
406 struct rndis_device *rndis_dev = net_dev->extension;
407 struct rndis_message *rndis_msg = data; 415 struct rndis_message *rndis_msg = data;
408 416
409 /* Make sure the rndis device state is initialized */
410 if (unlikely(!rndis_dev)) {
411 netif_dbg(net_device_ctx, rx_err, ndev,
412 "got rndis message but no rndis device!\n");
413 return NVSP_STAT_FAIL;
414 }
415
416 if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
417 netif_dbg(net_device_ctx, rx_err, ndev,
418 "got rndis message uninitialized\n");
419 return NVSP_STAT_FAIL;
420 }
421
422 if (netif_msg_rx_status(net_device_ctx)) 417 if (netif_msg_rx_status(net_device_ctx))
423 dump_rndis_message(ndev, rndis_msg); 418 dump_rndis_message(ndev, rndis_msg);
424 419
425 switch (rndis_msg->ndis_msg_type) { 420 switch (rndis_msg->ndis_msg_type) {
426 case RNDIS_MSG_PACKET: 421 case RNDIS_MSG_PACKET:
427 return rndis_filter_receive_data(ndev, net_dev, 422 return rndis_filter_receive_data(ndev, net_dev, rndis_msg,
428 rndis_dev, rndis_msg,
429 channel, data, buflen); 423 channel, data, buflen);
430 case RNDIS_MSG_INIT_C: 424 case RNDIS_MSG_INIT_C:
431 case RNDIS_MSG_QUERY_C: 425 case RNDIS_MSG_QUERY_C:
432 case RNDIS_MSG_SET_C: 426 case RNDIS_MSG_SET_C:
433 /* completion msgs */ 427 /* completion msgs */
434 rndis_filter_receive_response(rndis_dev, rndis_msg); 428 rndis_filter_receive_response(ndev, net_dev, rndis_msg);
435 break; 429 break;
436 430
437 case RNDIS_MSG_INDICATE: 431 case RNDIS_MSG_INDICATE:
@@ -828,13 +822,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
828 struct rndis_set_request *set; 822 struct rndis_set_request *set;
829 int ret; 823 int ret;
830 824
825 if (dev->filter == new_filter)
826 return 0;
827
831 request = get_rndis_request(dev, RNDIS_MSG_SET, 828 request = get_rndis_request(dev, RNDIS_MSG_SET,
832 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + 829 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
833 sizeof(u32)); 830 sizeof(u32));
834 if (!request) 831 if (!request)
835 return -ENOMEM; 832 return -ENOMEM;
836 833
837
838 /* Setup the rndis set */ 834 /* Setup the rndis set */
839 set = &request->request_msg.msg.set_req; 835 set = &request->request_msg.msg.set_req;
840 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; 836 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
@@ -845,8 +841,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
845 &new_filter, sizeof(u32)); 841 &new_filter, sizeof(u32));
846 842
847 ret = rndis_filter_send_request(dev, request); 843 ret = rndis_filter_send_request(dev, request);
848 if (ret == 0) 844 if (ret == 0) {
849 wait_for_completion(&request->wait_event); 845 wait_for_completion(&request->wait_event);
846 dev->filter = new_filter;
847 }
850 848
851 put_rndis_request(dev, request); 849 put_rndis_request(dev, request);
852 850
@@ -864,9 +862,9 @@ static void rndis_set_multicast(struct work_struct *w)
864 filter = NDIS_PACKET_TYPE_PROMISCUOUS; 862 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
865 } else { 863 } else {
866 if (flags & IFF_ALLMULTI) 864 if (flags & IFF_ALLMULTI)
867 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST; 865 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
868 if (flags & IFF_BROADCAST) 866 if (flags & IFF_BROADCAST)
869 flags |= NDIS_PACKET_TYPE_BROADCAST; 867 filter |= NDIS_PACKET_TYPE_BROADCAST;
870 } 868 }
871 869
872 rndis_filter_set_packet_filter(rdev, filter); 870 rndis_filter_set_packet_filter(rdev, filter);
@@ -1124,6 +1122,7 @@ void rndis_set_subchannel(struct work_struct *w)
1124 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1122 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1125 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1123 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1126 1124
1125 netif_device_attach(ndev);
1127 rtnl_unlock(); 1126 rtnl_unlock();
1128 return; 1127 return;
1129 1128
@@ -1134,6 +1133,8 @@ failed:
1134 1133
1135 nvdev->max_chn = 1; 1134 nvdev->max_chn = 1;
1136 nvdev->num_chn = 1; 1135 nvdev->num_chn = 1;
1136
1137 netif_device_attach(ndev);
1137unlock: 1138unlock:
1138 rtnl_unlock(); 1139 rtnl_unlock();
1139} 1140}
@@ -1336,6 +1337,10 @@ out:
1336 net_device->num_chn = 1; 1337 net_device->num_chn = 1;
1337 } 1338 }
1338 1339
1340 /* No sub channels, device is ready */
1341 if (net_device->num_chn == 1)
1342 netif_device_attach(net);
1343
1339 return net_device; 1344 return net_device;
1340 1345
1341err_dev_remv: 1346err_dev_remv:
@@ -1348,16 +1353,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
1348{ 1353{
1349 struct rndis_device *rndis_dev = net_dev->extension; 1354 struct rndis_device *rndis_dev = net_dev->extension;
1350 1355
1351 /* Don't try and setup sub channels if about to halt */
1352 cancel_work_sync(&net_dev->subchan_work);
1353
1354 /* Halt and release the rndis device */ 1356 /* Halt and release the rndis device */
1355 rndis_filter_halt_device(net_dev, rndis_dev); 1357 rndis_filter_halt_device(net_dev, rndis_dev);
1356 1358
1357 net_dev->extension = NULL; 1359 net_dev->extension = NULL;
1358 1360
1359 netvsc_device_remove(dev); 1361 netvsc_device_remove(dev);
1360 kfree(rndis_dev);
1361} 1362}
1362 1363
1363int rndis_filter_open(struct netvsc_device *nvdev) 1364int rndis_filter_open(struct netvsc_device *nvdev)
@@ -1375,10 +1376,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)
1375 1376
1376 return rndis_filter_close_device(nvdev->extension); 1377 return rndis_filter_close_device(nvdev->extension);
1377} 1378}
1378
1379bool rndis_filter_opened(const struct netvsc_device *nvdev)
1380{
1381 const struct rndis_device *dev = nvdev->extension;
1382
1383 return dev->state == RNDIS_DEV_DATAINITIALIZED;
1384}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7de88b33d5b9..9cbb0c8a896a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3277 3277
3278 err = netdev_upper_dev_link(real_dev, dev, extack); 3278 err = netdev_upper_dev_link(real_dev, dev, extack);
3279 if (err < 0) 3279 if (err < 0)
3280 goto unregister; 3280 goto put_dev;
3281 3281
3282 /* need to be already registered so that ->init has run and 3282 /* need to be already registered so that ->init has run and
3283 * the MAC addr is set 3283 * the MAC addr is set
@@ -3316,7 +3316,8 @@ del_dev:
3316 macsec_del_dev(macsec); 3316 macsec_del_dev(macsec);
3317unlink: 3317unlink:
3318 netdev_upper_dev_unlink(real_dev, dev); 3318 netdev_upper_dev_unlink(real_dev, dev);
3319unregister: 3319put_dev:
3320 dev_put(real_dev);
3320 unregister_netdevice(dev); 3321 unregister_netdevice(dev);
3321 return err; 3322 return err;
3322} 3323}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8fc02d9db3d0..725f4b4afc6d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
1036 lowerdev_features &= (features | ~NETIF_F_LRO); 1036 lowerdev_features &= (features | ~NETIF_F_LRO);
1037 features = netdev_increment_features(lowerdev_features, features, mask); 1037 features = netdev_increment_features(lowerdev_features, features, mask);
1038 features |= ALWAYS_ON_FEATURES; 1038 features |= ALWAYS_ON_FEATURES;
1039 features &= ~NETIF_F_NETNS_LOCAL; 1039 features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
1040 1040
1041 return features; 1041 return features;
1042} 1042}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 171010eb4d9c..5ad130c3da43 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
341 unsigned int i; 341 unsigned int i;
342 342
343 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) 343 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
344 memcpy(data + i * ETH_GSTRING_LEN, 344 strlcpy(data + i * ETH_GSTRING_LEN,
345 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); 345 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
346} 346}
347EXPORT_SYMBOL_GPL(bcm_phy_get_strings); 347EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
348 348
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 98fd6b7ceeec..a75c511950c3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
1452 int i; 1452 int i;
1453 1453
1454 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { 1454 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
1455 memcpy(data + i * ETH_GSTRING_LEN, 1455 strlcpy(data + i * ETH_GSTRING_LEN,
1456 marvell_hw_stats[i].string, ETH_GSTRING_LEN); 1456 marvell_hw_stats[i].string, ETH_GSTRING_LEN);
1457 } 1457 }
1458} 1458}
1459 1459
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0f45310300f6..f41b224a9cdb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
635 return 0; 635 return 0;
636} 636}
637 637
638/* This routine returns -1 as an indication to the caller that the
639 * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
640 * MMD extended PHY registers.
641 */
642static int
643ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
644{
645 return -1;
646}
647
648/* This routine does nothing since the Micrel ksz9021 does not support
649 * standard IEEE MMD extended PHY registers.
650 */
651static int
652ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
653{
654 return -1;
655}
656
657static int kszphy_get_sset_count(struct phy_device *phydev) 638static int kszphy_get_sset_count(struct phy_device *phydev)
658{ 639{
659 return ARRAY_SIZE(kszphy_hw_stats); 640 return ARRAY_SIZE(kszphy_hw_stats);
@@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
664 int i; 645 int i;
665 646
666 for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { 647 for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
667 memcpy(data + i * ETH_GSTRING_LEN, 648 strlcpy(data + i * ETH_GSTRING_LEN,
668 kszphy_hw_stats[i].string, ETH_GSTRING_LEN); 649 kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
669 } 650 }
670} 651}
671 652
@@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = {
946 .get_stats = kszphy_get_stats, 927 .get_stats = kszphy_get_stats,
947 .suspend = genphy_suspend, 928 .suspend = genphy_suspend,
948 .resume = genphy_resume, 929 .resume = genphy_resume,
949 .read_mmd = ksz9021_rd_mmd_phyreg, 930 .read_mmd = genphy_read_mmd_unsupported,
950 .write_mmd = ksz9021_wr_mmd_phyreg, 931 .write_mmd = genphy_write_mmd_unsupported,
951}, { 932}, {
952 .phy_id = PHY_ID_KSZ9031, 933 .phy_id = PHY_ID_KSZ9031,
953 .phy_id_mask = MICREL_PHY_ID_MASK, 934 .phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c2d9027be863..05c1e8ef15e6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -618,6 +618,68 @@ static void phy_error(struct phy_device *phydev)
618} 618}
619 619
620/** 620/**
621 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
622 * @phydev: target phy_device struct
623 */
624static int phy_disable_interrupts(struct phy_device *phydev)
625{
626 int err;
627
628 /* Disable PHY interrupts */
629 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
630 if (err)
631 return err;
632
633 /* Clear the interrupt */
634 return phy_clear_interrupt(phydev);
635}
636
637/**
638 * phy_change - Called by the phy_interrupt to handle PHY changes
639 * @phydev: phy_device struct that interrupted
640 */
641static irqreturn_t phy_change(struct phy_device *phydev)
642{
643 if (phy_interrupt_is_valid(phydev)) {
644 if (phydev->drv->did_interrupt &&
645 !phydev->drv->did_interrupt(phydev))
646 return IRQ_NONE;
647
648 if (phydev->state == PHY_HALTED)
649 if (phy_disable_interrupts(phydev))
650 goto phy_err;
651 }
652
653 mutex_lock(&phydev->lock);
654 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
655 phydev->state = PHY_CHANGELINK;
656 mutex_unlock(&phydev->lock);
657
658 /* reschedule state queue work to run as soon as possible */
659 phy_trigger_machine(phydev, true);
660
661 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
662 goto phy_err;
663 return IRQ_HANDLED;
664
665phy_err:
666 phy_error(phydev);
667 return IRQ_NONE;
668}
669
670/**
671 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
672 * @work: work_struct that describes the work to be done
673 */
674void phy_change_work(struct work_struct *work)
675{
676 struct phy_device *phydev =
677 container_of(work, struct phy_device, phy_queue);
678
679 phy_change(phydev);
680}
681
682/**
621 * phy_interrupt - PHY interrupt handler 683 * phy_interrupt - PHY interrupt handler
622 * @irq: interrupt line 684 * @irq: interrupt line
623 * @phy_dat: phy_device pointer 685 * @phy_dat: phy_device pointer
@@ -632,9 +694,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
632 if (PHY_HALTED == phydev->state) 694 if (PHY_HALTED == phydev->state)
633 return IRQ_NONE; /* It can't be ours. */ 695 return IRQ_NONE; /* It can't be ours. */
634 696
635 phy_change(phydev); 697 return phy_change(phydev);
636
637 return IRQ_HANDLED;
638} 698}
639 699
640/** 700/**
@@ -652,23 +712,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
652} 712}
653 713
654/** 714/**
655 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
656 * @phydev: target phy_device struct
657 */
658static int phy_disable_interrupts(struct phy_device *phydev)
659{
660 int err;
661
662 /* Disable PHY interrupts */
663 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
664 if (err)
665 return err;
666
667 /* Clear the interrupt */
668 return phy_clear_interrupt(phydev);
669}
670
671/**
672 * phy_start_interrupts - request and enable interrupts for a PHY device 715 * phy_start_interrupts - request and enable interrupts for a PHY device
673 * @phydev: target phy_device struct 716 * @phydev: target phy_device struct
674 * 717 *
@@ -711,50 +754,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
711EXPORT_SYMBOL(phy_stop_interrupts); 754EXPORT_SYMBOL(phy_stop_interrupts);
712 755
713/** 756/**
714 * phy_change - Called by the phy_interrupt to handle PHY changes
715 * @phydev: phy_device struct that interrupted
716 */
717void phy_change(struct phy_device *phydev)
718{
719 if (phy_interrupt_is_valid(phydev)) {
720 if (phydev->drv->did_interrupt &&
721 !phydev->drv->did_interrupt(phydev))
722 return;
723
724 if (phydev->state == PHY_HALTED)
725 if (phy_disable_interrupts(phydev))
726 goto phy_err;
727 }
728
729 mutex_lock(&phydev->lock);
730 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
731 phydev->state = PHY_CHANGELINK;
732 mutex_unlock(&phydev->lock);
733
734 /* reschedule state queue work to run as soon as possible */
735 phy_trigger_machine(phydev, true);
736
737 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
738 goto phy_err;
739 return;
740
741phy_err:
742 phy_error(phydev);
743}
744
745/**
746 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
747 * @work: work_struct that describes the work to be done
748 */
749void phy_change_work(struct work_struct *work)
750{
751 struct phy_device *phydev =
752 container_of(work, struct phy_device, phy_queue);
753
754 phy_change(phydev);
755}
756
757/**
758 * phy_stop - Bring down the PHY link, and stop checking the status 757 * phy_stop - Bring down the PHY link, and stop checking the status
759 * @phydev: target phy_device struct 758 * @phydev: target phy_device struct
760 */ 759 */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b285323327c4..ac23322a32e1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1012,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1012 err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, 1012 err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
1013 "attached_dev"); 1013 "attached_dev");
1014 if (!err) { 1014 if (!err) {
1015 err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, 1015 err = sysfs_create_link_nowarn(&dev->dev.kobj,
1016 "phydev"); 1016 &phydev->mdio.dev.kobj,
1017 if (err) 1017 "phydev");
1018 goto error; 1018 if (err) {
1019 dev_err(&dev->dev, "could not add device link to %s err %d\n",
1020 kobject_name(&phydev->mdio.dev.kobj),
1021 err);
1022 /* non-fatal - some net drivers can use one netdevice
1023 * with more then one phy
1024 */
1025 }
1019 1026
1020 phydev->sysfs_links = true; 1027 phydev->sysfs_links = true;
1021 } 1028 }
@@ -1666,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev)
1666} 1673}
1667EXPORT_SYMBOL(genphy_config_init); 1674EXPORT_SYMBOL(genphy_config_init);
1668 1675
1676/* This is used for the phy device which doesn't support the MMD extended
1677 * register access, but it does have side effect when we are trying to access
1678 * the MMD register via indirect method.
1679 */
1680int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum)
1681{
1682 return -EOPNOTSUPP;
1683}
1684EXPORT_SYMBOL(genphy_read_mmd_unsupported);
1685
1686int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
1687 u16 regnum, u16 val)
1688{
1689 return -EOPNOTSUPP;
1690}
1691EXPORT_SYMBOL(genphy_write_mmd_unsupported);
1692
1669int genphy_suspend(struct phy_device *phydev) 1693int genphy_suspend(struct phy_device *phydev)
1670{ 1694{
1671 return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); 1695 return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index ee3ca4a2f12b..9f48ecf9c627 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = {
172 .flags = PHY_HAS_INTERRUPT, 172 .flags = PHY_HAS_INTERRUPT,
173 .ack_interrupt = &rtl821x_ack_interrupt, 173 .ack_interrupt = &rtl821x_ack_interrupt,
174 .config_intr = &rtl8211b_config_intr, 174 .config_intr = &rtl8211b_config_intr,
175 .read_mmd = &genphy_read_mmd_unsupported,
176 .write_mmd = &genphy_write_mmd_unsupported,
175 }, { 177 }, {
176 .phy_id = 0x001cc914, 178 .phy_id = 0x001cc914,
177 .name = "RTL8211DN Gigabit Ethernet", 179 .name = "RTL8211DN Gigabit Ethernet",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 7dc2f34e7229..926c2c322d43 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -257,7 +257,7 @@ struct ppp_net {
257/* Prototypes. */ 257/* Prototypes. */
258static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 258static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
259 struct file *file, unsigned int cmd, unsigned long arg); 259 struct file *file, unsigned int cmd, unsigned long arg);
260static void ppp_xmit_process(struct ppp *ppp); 260static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
261static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 261static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
262static void ppp_push(struct ppp *ppp); 262static void ppp_push(struct ppp *ppp);
263static void ppp_channel_push(struct channel *pch); 263static void ppp_channel_push(struct channel *pch);
@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
513 goto out; 513 goto out;
514 } 514 }
515 515
516 skb_queue_tail(&pf->xq, skb);
517
518 switch (pf->kind) { 516 switch (pf->kind) {
519 case INTERFACE: 517 case INTERFACE:
520 ppp_xmit_process(PF_TO_PPP(pf)); 518 ppp_xmit_process(PF_TO_PPP(pf), skb);
521 break; 519 break;
522 case CHANNEL: 520 case CHANNEL:
521 skb_queue_tail(&pf->xq, skb);
523 ppp_channel_push(PF_TO_CHANNEL(pf)); 522 ppp_channel_push(PF_TO_CHANNEL(pf));
524 break; 523 break;
525 } 524 }
@@ -1268,8 +1267,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1268 put_unaligned_be16(proto, pp); 1267 put_unaligned_be16(proto, pp);
1269 1268
1270 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); 1269 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1271 skb_queue_tail(&ppp->file.xq, skb); 1270 ppp_xmit_process(ppp, skb);
1272 ppp_xmit_process(ppp); 1271
1273 return NETDEV_TX_OK; 1272 return NETDEV_TX_OK;
1274 1273
1275 outf: 1274 outf:
@@ -1421,13 +1420,14 @@ static void ppp_setup(struct net_device *dev)
1421 */ 1420 */
1422 1421
1423/* Called to do any work queued up on the transmit side that can now be done */ 1422/* Called to do any work queued up on the transmit side that can now be done */
1424static void __ppp_xmit_process(struct ppp *ppp) 1423static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1425{ 1424{
1426 struct sk_buff *skb;
1427
1428 ppp_xmit_lock(ppp); 1425 ppp_xmit_lock(ppp);
1429 if (!ppp->closing) { 1426 if (!ppp->closing) {
1430 ppp_push(ppp); 1427 ppp_push(ppp);
1428
1429 if (skb)
1430 skb_queue_tail(&ppp->file.xq, skb);
1431 while (!ppp->xmit_pending && 1431 while (!ppp->xmit_pending &&
1432 (skb = skb_dequeue(&ppp->file.xq))) 1432 (skb = skb_dequeue(&ppp->file.xq)))
1433 ppp_send_frame(ppp, skb); 1433 ppp_send_frame(ppp, skb);
@@ -1441,7 +1441,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
1441 ppp_xmit_unlock(ppp); 1441 ppp_xmit_unlock(ppp);
1442} 1442}
1443 1443
1444static void ppp_xmit_process(struct ppp *ppp) 1444static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1445{ 1445{
1446 local_bh_disable(); 1446 local_bh_disable();
1447 1447
@@ -1449,7 +1449,7 @@ static void ppp_xmit_process(struct ppp *ppp)
1449 goto err; 1449 goto err;
1450 1450
1451 (*this_cpu_ptr(ppp->xmit_recursion))++; 1451 (*this_cpu_ptr(ppp->xmit_recursion))++;
1452 __ppp_xmit_process(ppp); 1452 __ppp_xmit_process(ppp, skb);
1453 (*this_cpu_ptr(ppp->xmit_recursion))--; 1453 (*this_cpu_ptr(ppp->xmit_recursion))--;
1454 1454
1455 local_bh_enable(); 1455 local_bh_enable();
@@ -1459,6 +1459,8 @@ static void ppp_xmit_process(struct ppp *ppp)
1459err: 1459err:
1460 local_bh_enable(); 1460 local_bh_enable();
1461 1461
1462 kfree_skb(skb);
1463
1462 if (net_ratelimit()) 1464 if (net_ratelimit())
1463 netdev_err(ppp->dev, "recursion detected\n"); 1465 netdev_err(ppp->dev, "recursion detected\n");
1464} 1466}
@@ -1943,7 +1945,7 @@ static void __ppp_channel_push(struct channel *pch)
1943 if (skb_queue_empty(&pch->file.xq)) { 1945 if (skb_queue_empty(&pch->file.xq)) {
1944 ppp = pch->ppp; 1946 ppp = pch->ppp;
1945 if (ppp) 1947 if (ppp)
1946 __ppp_xmit_process(ppp); 1948 __ppp_xmit_process(ppp, NULL);
1947 } 1949 }
1948} 1950}
1949 1951
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 5dd781e65958..222093e878a8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2401,7 +2401,7 @@ send_done:
2401 if (!nlh) { 2401 if (!nlh) {
2402 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2402 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2403 if (err) 2403 if (err)
2404 goto errout; 2404 return err;
2405 goto send_done; 2405 goto send_done;
2406 } 2406 }
2407 2407
@@ -2687,7 +2687,7 @@ send_done:
2687 if (!nlh) { 2687 if (!nlh) {
2688 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2688 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2689 if (err) 2689 if (err)
2690 goto errout; 2690 return err;
2691 goto send_done; 2691 goto send_done;
2692 } 2692 }
2693 2693
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index baeafa004463..a1ba262f40ad 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -656,7 +656,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
656 return tun; 656 return tun;
657} 657}
658 658
659static void tun_ptr_free(void *ptr) 659void tun_ptr_free(void *ptr)
660{ 660{
661 if (!ptr) 661 if (!ptr)
662 return; 662 return;
@@ -668,6 +668,7 @@ static void tun_ptr_free(void *ptr)
668 __skb_array_destroy_skb(ptr); 668 __skb_array_destroy_skb(ptr);
669 } 669 }
670} 670}
671EXPORT_SYMBOL_GPL(tun_ptr_free);
671 672
672static void tun_queue_purge(struct tun_file *tfile) 673static void tun_queue_purge(struct tun_file *tfile)
673{ 674{
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8a22ff67b026..d9eea8cfe6cb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
315void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 315void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
316{ 316{
317 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 317 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
318 unsigned long flags;
318 int status; 319 int status;
319 320
320 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 321 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
@@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
326 if (skb->protocol == 0) 327 if (skb->protocol == 0)
327 skb->protocol = eth_type_trans (skb, dev->net); 328 skb->protocol = eth_type_trans (skb, dev->net);
328 329
329 u64_stats_update_begin(&stats64->syncp); 330 flags = u64_stats_update_begin_irqsave(&stats64->syncp);
330 stats64->rx_packets++; 331 stats64->rx_packets++;
331 stats64->rx_bytes += skb->len; 332 stats64->rx_bytes += skb->len;
332 u64_stats_update_end(&stats64->syncp); 333 u64_stats_update_end_irqrestore(&stats64->syncp, flags);
333 334
334 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 335 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
335 skb->len + sizeof (struct ethhdr), skb->protocol); 336 skb->len + sizeof (struct ethhdr), skb->protocol);
@@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb)
1248 1249
1249 if (urb->status == 0) { 1250 if (urb->status == 0) {
1250 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 1251 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
1252 unsigned long flags;
1251 1253
1252 u64_stats_update_begin(&stats64->syncp); 1254 flags = u64_stats_update_begin_irqsave(&stats64->syncp);
1253 stats64->tx_packets += entry->packets; 1255 stats64->tx_packets += entry->packets;
1254 stats64->tx_bytes += entry->length; 1256 stats64->tx_bytes += entry->length;
1255 u64_stats_update_end(&stats64->syncp); 1257 u64_stats_update_end_irqrestore(&stats64->syncp, flags);
1256 } else { 1258 } else {
1257 dev->net->stats.tx_errors++; 1259 dev->net->stats.tx_errors++;
1258 1260
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 8b39c160743d..e04937f44f33 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
977{ 977{
978 int ret; 978 int ret;
979 u32 count; 979 u32 count;
980 int num_pkts;
981 int tx_num_deferred;
980 unsigned long flags; 982 unsigned long flags;
981 struct vmxnet3_tx_ctx ctx; 983 struct vmxnet3_tx_ctx ctx;
982 union Vmxnet3_GenericDesc *gdesc; 984 union Vmxnet3_GenericDesc *gdesc;
@@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1075#else 1077#else
1076 gdesc = ctx.sop_txd; 1078 gdesc = ctx.sop_txd;
1077#endif 1079#endif
1080 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1078 if (ctx.mss) { 1081 if (ctx.mss) {
1079 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1082 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1080 gdesc->txd.om = VMXNET3_OM_TSO; 1083 gdesc->txd.om = VMXNET3_OM_TSO;
1081 gdesc->txd.msscof = ctx.mss; 1084 gdesc->txd.msscof = ctx.mss;
1082 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1085 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1083 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1084 } else { 1086 } else {
1085 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1087 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1086 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1088 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1091 gdesc->txd.om = 0; 1093 gdesc->txd.om = 0;
1092 gdesc->txd.msscof = 0; 1094 gdesc->txd.msscof = 0;
1093 } 1095 }
1094 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1096 num_pkts = 1;
1095 } 1097 }
1098 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1099 tx_num_deferred += num_pkts;
1096 1100
1097 if (skb_vlan_tag_present(skb)) { 1101 if (skb_vlan_tag_present(skb)) {
1098 gdesc->txd.ti = 1; 1102 gdesc->txd.ti = 1;
@@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1118 1122
1119 spin_unlock_irqrestore(&tq->tx_lock, flags); 1123 spin_unlock_irqrestore(&tq->tx_lock, flags);
1120 1124
1121 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1125 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1122 le32_to_cpu(tq->shared->txThreshold)) {
1123 tq->shared->txNumDeferred = 0; 1126 tq->shared->txNumDeferred = 0;
1124 VMXNET3_WRITE_BAR0_REG(adapter, 1127 VMXNET3_WRITE_BAR0_REG(adapter,
1125 VMXNET3_REG_TXPROD + tq->qid * 8, 1128 VMXNET3_REG_TXPROD + tq->qid * 8,
@@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1470 vmxnet3_rx_csum(adapter, skb, 1473 vmxnet3_rx_csum(adapter, skb,
1471 (union Vmxnet3_GenericDesc *)rcd); 1474 (union Vmxnet3_GenericDesc *)rcd);
1472 skb->protocol = eth_type_trans(skb, adapter->netdev); 1475 skb->protocol = eth_type_trans(skb, adapter->netdev);
1473 if (!rcd->tcp || !adapter->lro) 1476 if (!rcd->tcp ||
1477 !(adapter->netdev->features & NETIF_F_LRO))
1474 goto not_lro; 1478 goto not_lro;
1475 1479
1476 if (segCnt != 0 && mss != 0) { 1480 if (segCnt != 0 && mss != 0) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5ba222920e80..59ec34052a65 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
@@ -342,9 +342,6 @@ struct vmxnet3_adapter {
342 u8 __iomem *hw_addr1; /* for BAR 1 */ 342 u8 __iomem *hw_addr1; /* for BAR 1 */
343 u8 version; 343 u8 version;
344 344
345 bool rxcsum;
346 bool lro;
347
348#ifdef VMXNET3_RSS 345#ifdef VMXNET3_RSS
349 struct UPT1_RSSConf *rss_conf; 346 struct UPT1_RSSConf *rss_conf;
350 bool rss; 347 bool rss;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index e89e5ef2c2a4..f246e9ed4a81 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
729 ieee80211_hw_set(hw, SPECTRUM_MGMT); 729 ieee80211_hw_set(hw, SPECTRUM_MGMT);
730 ieee80211_hw_set(hw, SIGNAL_DBM); 730 ieee80211_hw_set(hw, SIGNAL_DBM);
731 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 731 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
732 ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP);
732 733
733 if (ath9k_ps_enable) 734 if (ath9k_ps_enable)
734 ieee80211_hw_set(hw, SUPPORTS_PS); 735 ieee80211_hw_set(hw, SUPPORTS_PS);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index df8a1ecb9924..232dcbb83311 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason {
181 * @netif_stop_lock: spinlock for update netif_stop from multiple sources. 181 * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
182 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. 182 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
183 * @pend_8021x_wait: used for signalling change in count. 183 * @pend_8021x_wait: used for signalling change in count.
184 * @fwil_fwerr: flag indicating fwil layer should return firmware error codes.
184 */ 185 */
185struct brcmf_if { 186struct brcmf_if {
186 struct brcmf_pub *drvr; 187 struct brcmf_pub *drvr;
@@ -198,6 +199,7 @@ struct brcmf_if {
198 wait_queue_head_t pend_8021x_wait; 199 wait_queue_head_t pend_8021x_wait;
199 struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; 200 struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];
200 u8 ipv6addr_idx; 201 u8 ipv6addr_idx;
202 bool fwil_fwerr;
201}; 203};
202 204
203int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); 205int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 47de35a33853..bede7b7fd996 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
104 u32 data; 104 u32 data;
105 int err; 105 int err;
106 106
107 /* we need to know firmware error */
108 ifp->fwil_fwerr = true;
109
107 err = brcmf_fil_iovar_int_get(ifp, name, &data); 110 err = brcmf_fil_iovar_int_get(ifp, name, &data);
108 if (err == 0) { 111 if (err == 0) {
109 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); 112 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
112 brcmf_dbg(TRACE, "%s feature check failed: %d\n", 115 brcmf_dbg(TRACE, "%s feature check failed: %d\n",
113 brcmf_feat_names[id], err); 116 brcmf_feat_names[id], err);
114 } 117 }
118
119 ifp->fwil_fwerr = false;
115} 120}
116 121
117static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, 122static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
@@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
120{ 125{
121 int err; 126 int err;
122 127
128 /* we need to know firmware error */
129 ifp->fwil_fwerr = true;
130
123 err = brcmf_fil_iovar_data_set(ifp, name, data, len); 131 err = brcmf_fil_iovar_data_set(ifp, name, data, len);
124 if (err != -BRCMF_FW_UNSUPPORTED) { 132 if (err != -BRCMF_FW_UNSUPPORTED) {
125 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); 133 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
128 brcmf_dbg(TRACE, "%s feature check failed: %d\n", 136 brcmf_dbg(TRACE, "%s feature check failed: %d\n",
129 brcmf_feat_names[id], err); 137 brcmf_feat_names[id], err);
130 } 138 }
139
140 ifp->fwil_fwerr = false;
131} 141}
132 142
133#define MAX_CAPS_BUFFER_SIZE 512 143#define MAX_CAPS_BUFFER_SIZE 512
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f2cfdd3b2bf1..fc5751116d99 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -131,6 +131,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
131 brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); 131 brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
132 err = -EBADE; 132 err = -EBADE;
133 } 133 }
134 if (ifp->fwil_fwerr)
135 return fwerr;
136
134 return err; 137 return err;
135} 138}
136 139
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 2ee54133efa1..82064e909784 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
462 * @dev_addr: optional device address. 462 * @dev_addr: optional device address.
463 * 463 *
464 * P2P needs mac addresses for P2P device and interface. If no device 464 * P2P needs mac addresses for P2P device and interface. If no device
465 * address it specified, these are derived from the primary net device, ie. 465 * address it specified, these are derived from a random ethernet
466 * the permanent ethernet address of the device. 466 * address.
467 */ 467 */
468static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) 468static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
469{ 469{
470 struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 470 bool random_addr = false;
471 bool local_admin = false;
472 471
473 if (!dev_addr || is_zero_ether_addr(dev_addr)) { 472 if (!dev_addr || is_zero_ether_addr(dev_addr))
474 dev_addr = pri_ifp->mac_addr; 473 random_addr = true;
475 local_admin = true;
476 }
477 474
478 /* Generate the P2P Device Address. This consists of the device's 475 /* Generate the P2P Device Address obtaining a random ethernet
479 * primary MAC address with the locally administered bit set. 476 * address with the locally administered bit set.
480 */ 477 */
481 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); 478 if (random_addr)
482 if (local_admin) 479 eth_random_addr(p2p->dev_addr);
483 p2p->dev_addr[0] |= 0x02; 480 else
481 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
484 482
485 /* Generate the P2P Interface Address. If the discovery and connection 483 /* Generate the P2P Interface Address. If the discovery and connection
486 * BSSCFGs need to simultaneously co-exist, then this address must be 484 * BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c5f2ddf9b0fe..e5a2fc738ac3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING
91config IWLWIFI_PCIE_RTPM 91config IWLWIFI_PCIE_RTPM
92 bool "Enable runtime power management mode for PCIe devices" 92 bool "Enable runtime power management mode for PCIe devices"
93 depends on IWLMVM && PM && EXPERT 93 depends on IWLMVM && PM && EXPERT
94 default false
95 help 94 help
96 Say Y here to enable runtime power management for PCIe 95 Say Y here to enable runtime power management for PCIe
97 devices. If enabled, the device will go into low power mode 96 devices. If enabled, the device will go into low power mode
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 3721a3ed358b..f824bebceb06 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -211,7 +211,7 @@ enum {
211 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end 211 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
212 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. 212 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
213 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. 213 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
214 * @T2_V2_START_IMMEDIATELY: start time event immediately 214 * @TE_V2_START_IMMEDIATELY: start time event immediately
215 * @TE_V2_DEP_OTHER: depends on another time event 215 * @TE_V2_DEP_OTHER: depends on another time event
216 * @TE_V2_DEP_TSF: depends on a specific time 216 * @TE_V2_DEP_TSF: depends on a specific time
217 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC 217 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
@@ -230,7 +230,7 @@ enum iwl_time_event_policy {
230 TE_V2_NOTIF_HOST_FRAG_END = BIT(5), 230 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
231 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), 231 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
232 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), 232 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
233 T2_V2_START_IMMEDIATELY = BIT(11), 233 TE_V2_START_IMMEDIATELY = BIT(11),
234 234
235 /* placement characteristics */ 235 /* placement characteristics */
236 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), 236 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 67aefc8fc9ac..7bd704a3e640 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -942,7 +944,6 @@ dump_trans_data:
942 944
943out: 945out:
944 iwl_fw_free_dump_desc(fwrt); 946 iwl_fw_free_dump_desc(fwrt);
945 fwrt->dump.trig = NULL;
946 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); 947 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
947 IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); 948 IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
948} 949}
@@ -1112,6 +1113,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1112 fwrt->ops->dump_start(fwrt->ops_ctx)) 1113 fwrt->ops->dump_start(fwrt->ops_ctx))
1113 return; 1114 return;
1114 1115
1116 if (fwrt->ops && fwrt->ops->fw_running &&
1117 !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1118 IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1119 iwl_fw_free_dump_desc(fwrt);
1120 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1121 goto out;
1122 }
1123
1115 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1124 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1116 /* stop recording */ 1125 /* stop recording */
1117 iwl_fw_dbg_stop_recording(fwrt); 1126 iwl_fw_dbg_stop_recording(fwrt);
@@ -1145,7 +1154,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1145 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); 1154 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
1146 } 1155 }
1147 } 1156 }
1148 1157out:
1149 if (fwrt->ops && fwrt->ops->dump_end) 1158 if (fwrt->ops && fwrt->ops->dump_end)
1150 fwrt->ops->dump_end(fwrt->ops_ctx); 1159 fwrt->ops->dump_end(fwrt->ops_ctx);
1151} 1160}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 223fb77a3aa9..72259bff9922 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
91 if (fwrt->dump.desc != &iwl_dump_desc_assert) 93 if (fwrt->dump.desc != &iwl_dump_desc_assert)
92 kfree(fwrt->dump.desc); 94 kfree(fwrt->dump.desc);
93 fwrt->dump.desc = NULL; 95 fwrt->dump.desc = NULL;
96 fwrt->dump.trig = NULL;
94} 97}
95 98
96void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); 99void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index e57ff92a68ae..3da468d2cc92 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -75,6 +75,20 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
75 cancel_delayed_work_sync(&fwrt->timestamp.wk); 75 cancel_delayed_work_sync(&fwrt->timestamp.wk);
76} 76}
77 77
78static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
79{
80 cancel_delayed_work_sync(&fwrt->timestamp.wk);
81}
82
83static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
84{
85 if (!fwrt->timestamp.delay)
86 return;
87
88 schedule_delayed_work(&fwrt->timestamp.wk,
89 round_jiffies_relative(fwrt->timestamp.delay));
90}
91
78#else 92#else
79static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, 93static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
80 struct dentry *dbgfs_dir) 94 struct dentry *dbgfs_dir)
@@ -84,4 +98,8 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
84 98
85static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} 99static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
86 100
101static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
102
103static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
104
87#endif /* CONFIG_IWLWIFI_DEBUGFS */ 105#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index c39fe84bb4c4..2efac307909e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
77} 77}
78IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); 78IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
79 79
80void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) 80void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
81{ 81{
82 iwl_fw_cancel_timestamp(fwrt); 82 iwl_fw_suspend_timestamp(fwrt);
83} 83}
84IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); 84IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
85
86void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
87{
88 iwl_fw_resume_timestamp(fwrt);
89}
90IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index e25c049f980f..3fb940ebd74a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
26 * BSD LICENSE 27 * BSD LICENSE
27 * 28 *
28 * Copyright(c) 2017 Intel Deutschland GmbH 29 * Copyright(c) 2017 Intel Deutschland GmbH
30 * Copyright(c) 2018 Intel Corporation
29 * All rights reserved. 31 * All rights reserved.
30 * 32 *
31 * Redistribution and use in source and binary forms, with or without 33 * Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,7 @@
68struct iwl_fw_runtime_ops { 70struct iwl_fw_runtime_ops {
69 int (*dump_start)(void *ctx); 71 int (*dump_start)(void *ctx);
70 void (*dump_end)(void *ctx); 72 void (*dump_end)(void *ctx);
73 bool (*fw_running)(void *ctx);
71}; 74};
72 75
73#define MAX_NUM_LMAC 2 76#define MAX_NUM_LMAC 2
@@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
150 153
151void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); 154void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
152 155
156void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
157
158void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt);
159
153static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, 160static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
154 enum iwl_ucode_type cur_fw_img) 161 enum iwl_ucode_type cur_fw_img)
155{ 162{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 0e6cf39285f4..2efe9b099556 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1098 /* make sure the d0i3 exit work is not pending */ 1098 /* make sure the d0i3 exit work is not pending */
1099 flush_work(&mvm->d0i3_exit_work); 1099 flush_work(&mvm->d0i3_exit_work);
1100 1100
1101 iwl_fw_runtime_suspend(&mvm->fwrt);
1102
1101 ret = iwl_trans_suspend(trans); 1103 ret = iwl_trans_suspend(trans);
1102 if (ret) 1104 if (ret)
1103 return ret; 1105 return ret;
@@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
2012 2014
2013 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2015 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2014 2016
2017 iwl_fw_runtime_resume(&mvm->fwrt);
2018
2015 return ret; 2019 return ret;
2016} 2020}
2017 2021
@@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2038 2042
2039 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2043 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2040 2044
2045 iwl_fw_runtime_suspend(&mvm->fwrt);
2046
2041 /* start pseudo D3 */ 2047 /* start pseudo D3 */
2042 rtnl_lock(); 2048 rtnl_lock();
2043 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2049 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
@@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2098 __iwl_mvm_resume(mvm, true); 2104 __iwl_mvm_resume(mvm, true);
2099 rtnl_unlock(); 2105 rtnl_unlock();
2100 2106
2107 iwl_fw_runtime_resume(&mvm->fwrt);
2108
2101 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2109 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2102 2110
2103 iwl_abort_notification_waits(&mvm->notif_wait); 2111 iwl_abort_notification_waits(&mvm->notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a7892c1254a2..9c436d8d001d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
38 * All rights reserved. 40 * All rights reserved.
39 * 41 *
40 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1281{ 1283{
1282 int ret; 1284 int ret;
1283 1285
1284 if (!iwl_mvm_firmware_running(mvm))
1285 return -EIO;
1286
1287 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); 1286 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
1288 if (ret) 1287 if (ret)
1289 return ret; 1288 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 2f22e14e00fe..8ba16fc24e3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
438 } 438 }
439 439
440 /* Allocate the CAB queue for softAP and GO interfaces */ 440 /* Allocate the CAB queue for softAP and GO interfaces */
441 if (vif->type == NL80211_IFTYPE_AP) { 441 if (vif->type == NL80211_IFTYPE_AP ||
442 vif->type == NL80211_IFTYPE_ADHOC) {
442 /* 443 /*
443 * For TVQM this will be overwritten later with the FW assigned 444 * For TVQM this will be overwritten later with the FW assigned
444 * queue value (when queue is enabled). 445 * queue value (when queue is enabled).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8aed40a8bc38..ebf511150f4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -2106,15 +2107,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2106 if (ret) 2107 if (ret)
2107 goto out_remove; 2108 goto out_remove;
2108 2109
2109 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2110 /*
2110 if (ret) 2111 * This is not very nice, but the simplest:
2111 goto out_unbind; 2112 * For older FWs adding the mcast sta before the bcast station may
2112 2113 * cause assert 0x2b00.
2113 /* Send the bcast station. At this stage the TBTT and DTIM time events 2114 * This is fixed in later FW so make the order of removal depend on
2114 * are added and applied to the scheduler */ 2115 * the TLV
2115 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2116 */
2116 if (ret) 2117 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2117 goto out_rm_mcast; 2118 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2119 if (ret)
2120 goto out_unbind;
2121 /*
2122 * Send the bcast station. At this stage the TBTT and DTIM time
2123 * events are added and applied to the scheduler
2124 */
2125 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2126 if (ret) {
2127 iwl_mvm_rm_mcast_sta(mvm, vif);
2128 goto out_unbind;
2129 }
2130 } else {
2131 /*
2132 * Send the bcast station. At this stage the TBTT and DTIM time
2133 * events are added and applied to the scheduler
2134 */
2135 iwl_mvm_send_add_bcast_sta(mvm, vif);
2136 if (ret)
2137 goto out_unbind;
2138 iwl_mvm_add_mcast_sta(mvm, vif);
2139 if (ret) {
2140 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2141 goto out_unbind;
2142 }
2143 }
2118 2144
2119 /* must be set before quota calculations */ 2145 /* must be set before quota calculations */
2120 mvmvif->ap_ibss_active = true; 2146 mvmvif->ap_ibss_active = true;
@@ -2144,7 +2170,6 @@ out_quota_failed:
2144 iwl_mvm_power_update_mac(mvm); 2170 iwl_mvm_power_update_mac(mvm);
2145 mvmvif->ap_ibss_active = false; 2171 mvmvif->ap_ibss_active = false;
2146 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2172 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2147out_rm_mcast:
2148 iwl_mvm_rm_mcast_sta(mvm, vif); 2173 iwl_mvm_rm_mcast_sta(mvm, vif);
2149out_unbind: 2174out_unbind:
2150 iwl_mvm_binding_remove_vif(mvm, vif); 2175 iwl_mvm_binding_remove_vif(mvm, vif);
@@ -2682,6 +2707,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2682 2707
2683 /* enable beacon filtering */ 2708 /* enable beacon filtering */
2684 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2709 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2710
2711 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2712 false);
2713
2685 ret = 0; 2714 ret = 0;
2686 } else if (old_state == IEEE80211_STA_AUTHORIZED && 2715 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2687 new_state == IEEE80211_STA_ASSOC) { 2716 new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2d28e0804218..89ff02d7c876 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -90,6 +90,7 @@
90#include "fw/runtime.h" 90#include "fw/runtime.h"
91#include "fw/dbg.h" 91#include "fw/dbg.h"
92#include "fw/acpi.h" 92#include "fw/acpi.h"
93#include "fw/debugfs.h"
93 94
94#define IWL_MVM_MAX_ADDRESSES 5 95#define IWL_MVM_MAX_ADDRESSES 5
95/* RSSI offset for WkP */ 96/* RSSI offset for WkP */
@@ -1783,6 +1784,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1783 1784
1784static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) 1785static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1785{ 1786{
1787 iwl_fw_cancel_timestamp(&mvm->fwrt);
1786 iwl_free_fw_paging(&mvm->fwrt); 1788 iwl_free_fw_paging(&mvm->fwrt);
1787 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1789 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1788 iwl_fw_dump_conf_clear(&mvm->fwrt); 1790 iwl_fw_dump_conf_clear(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5d525a0023dc..ab7fb5aad984 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
38 * All rights reserved. 40 * All rights reserved.
39 * 41 *
40 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
552 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); 554 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
553} 555}
554 556
557static bool iwl_mvm_fwrt_fw_running(void *ctx)
558{
559 return iwl_mvm_firmware_running(ctx);
560}
561
555static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { 562static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
556 .dump_start = iwl_mvm_fwrt_dump_start, 563 .dump_start = iwl_mvm_fwrt_dump_start,
557 .dump_end = iwl_mvm_fwrt_dump_end, 564 .dump_end = iwl_mvm_fwrt_dump_end,
565 .fw_running = iwl_mvm_fwrt_fw_running,
558}; 566};
559 567
560static struct iwl_op_mode * 568static struct iwl_op_mode *
@@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
802 iwl_mvm_leds_exit(mvm); 810 iwl_mvm_leds_exit(mvm);
803 iwl_mvm_thermal_exit(mvm); 811 iwl_mvm_thermal_exit(mvm);
804 out_free: 812 out_free:
805 iwl_fw_runtime_exit(&mvm->fwrt);
806 iwl_fw_flush_dump(&mvm->fwrt); 813 iwl_fw_flush_dump(&mvm->fwrt);
807 814
808 if (iwlmvm_mod_params.init_dbg) 815 if (iwlmvm_mod_params.init_dbg)
@@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
843#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) 850#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
844 kfree(mvm->d3_resume_sram); 851 kfree(mvm->d3_resume_sram);
845#endif 852#endif
846 iwl_fw_runtime_exit(&mvm->fwrt);
847 iwl_trans_op_mode_leave(mvm->trans); 853 iwl_trans_op_mode_leave(mvm->trans);
848 854
849 iwl_phy_db_free(mvm->phy_db); 855 iwl_phy_db_free(mvm->phy_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 60abb0084ee5..47f4c7a1d80d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2684 struct ieee80211_sta *sta, 2684 struct ieee80211_sta *sta,
2685 struct iwl_lq_sta *lq_sta, 2685 struct iwl_lq_sta *lq_sta,
2686 enum nl80211_band band, 2686 enum nl80211_band band,
2687 struct rs_rate *rate) 2687 struct rs_rate *rate,
2688 bool init)
2688{ 2689{
2689 int i, nentries; 2690 int i, nentries;
2690 unsigned long active_rate; 2691 unsigned long active_rate;
@@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2738 */ 2739 */
2739 if (sta->vht_cap.vht_supported && 2740 if (sta->vht_cap.vht_supported &&
2740 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { 2741 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
2741 switch (sta->bandwidth) { 2742 /*
2742 case IEEE80211_STA_RX_BW_160: 2743 * In AP mode, when a new station associates, rs is initialized
2743 case IEEE80211_STA_RX_BW_80: 2744 * immediately upon association completion, before the phy
2744 case IEEE80211_STA_RX_BW_40: 2745 * context is updated with the association parameters, so the
2746 * sta bandwidth might be wider than the phy context allows.
2747 * To avoid this issue, always initialize rs with 20mhz
2748 * bandwidth rate, and after authorization, when the phy context
2749 * is already up-to-date, re-init rs with the correct bw.
2750 */
2751 u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
2752
2753 switch (bw) {
2754 case RATE_MCS_CHAN_WIDTH_40:
2755 case RATE_MCS_CHAN_WIDTH_80:
2756 case RATE_MCS_CHAN_WIDTH_160:
2745 initial_rates = rs_optimal_rates_vht; 2757 initial_rates = rs_optimal_rates_vht;
2746 nentries = ARRAY_SIZE(rs_optimal_rates_vht); 2758 nentries = ARRAY_SIZE(rs_optimal_rates_vht);
2747 break; 2759 break;
2748 case IEEE80211_STA_RX_BW_20: 2760 case RATE_MCS_CHAN_WIDTH_20:
2749 initial_rates = rs_optimal_rates_vht_20mhz; 2761 initial_rates = rs_optimal_rates_vht_20mhz;
2750 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); 2762 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
2751 break; 2763 break;
@@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2756 2768
2757 active_rate = lq_sta->active_siso_rate; 2769 active_rate = lq_sta->active_siso_rate;
2758 rate->type = LQ_VHT_SISO; 2770 rate->type = LQ_VHT_SISO;
2759 rate->bw = rs_bw_from_sta_bw(sta); 2771 rate->bw = bw;
2760 } else if (sta->ht_cap.ht_supported && 2772 } else if (sta->ht_cap.ht_supported &&
2761 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { 2773 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
2762 initial_rates = rs_optimal_rates_ht; 2774 initial_rates = rs_optimal_rates_ht;
@@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2839 tbl = &(lq_sta->lq_info[active_tbl]); 2851 tbl = &(lq_sta->lq_info[active_tbl]);
2840 rate = &tbl->rate; 2852 rate = &tbl->rate;
2841 2853
2842 rs_get_initial_rate(mvm, sta, lq_sta, band, rate); 2854 rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
2843 rs_init_optimal_rate(mvm, sta, lq_sta); 2855 rs_init_optimal_rate(mvm, sta, lq_sta);
2844 2856
2845 WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, 2857 WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a3f7c1bf3cc8..580de5851fc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); 72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
73 struct iwl_mvm_key_pn *ptk_pn; 73 struct iwl_mvm_key_pn *ptk_pn;
74 int res;
74 u8 tid, keyidx; 75 u8 tid, keyidx;
75 u8 pn[IEEE80211_CCMP_PN_LEN]; 76 u8 pn[IEEE80211_CCMP_PN_LEN];
76 u8 *extiv; 77 u8 *extiv;
@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
127 pn[4] = extiv[1]; 128 pn[4] = extiv[1];
128 pn[5] = extiv[0]; 129 pn[5] = extiv[0];
129 130
130 if (memcmp(pn, ptk_pn->q[queue].pn[tid], 131 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
131 IEEE80211_CCMP_PN_LEN) <= 0) 132 if (res < 0)
133 return -1;
134 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
132 return -1; 135 return -1;
133 136
134 if (!(stats->flag & RX_FLAG_AMSDU_MORE)) 137 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
135 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
136 stats->flag |= RX_FLAG_PN_VALIDATED; 138 stats->flag |= RX_FLAG_PN_VALIDATED;
137 139
138 return 0; 140 return 0;
@@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
314} 316}
315 317
316/* 318/*
317 * returns true if a packet outside BA session is a duplicate and 319 * returns true if a packet is a duplicate and should be dropped.
318 * should be dropped 320 * Updates AMSDU PN tracking info
319 */ 321 */
320static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, 322static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
321 struct ieee80211_rx_status *rx_status, 323 struct ieee80211_rx_status *rx_status,
322 struct ieee80211_hdr *hdr, 324 struct ieee80211_hdr *hdr,
323 struct iwl_rx_mpdu_desc *desc) 325 struct iwl_rx_mpdu_desc *desc)
324{ 326{
325 struct iwl_mvm_sta *mvm_sta; 327 struct iwl_mvm_sta *mvm_sta;
326 struct iwl_mvm_rxq_dup_data *dup_data; 328 struct iwl_mvm_rxq_dup_data *dup_data;
327 u8 baid, tid, sub_frame_idx; 329 u8 tid, sub_frame_idx;
328 330
329 if (WARN_ON(IS_ERR_OR_NULL(sta))) 331 if (WARN_ON(IS_ERR_OR_NULL(sta)))
330 return false; 332 return false;
331 333
332 baid = (le32_to_cpu(desc->reorder_data) &
333 IWL_RX_MPDU_REORDER_BAID_MASK) >>
334 IWL_RX_MPDU_REORDER_BAID_SHIFT;
335
336 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
337 return false;
338
339 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 334 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
340 dup_data = &mvm_sta->dup_data[queue]; 335 dup_data = &mvm_sta->dup_data[queue];
341 336
@@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
365 dup_data->last_sub_frame[tid] >= sub_frame_idx)) 360 dup_data->last_sub_frame[tid] >= sub_frame_idx))
366 return true; 361 return true;
367 362
363 /* Allow same PN as the first subframe for following sub frames */
364 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
365 sub_frame_idx > dup_data->last_sub_frame[tid] &&
366 desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
367 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
368
368 dup_data->last_seq[tid] = hdr->seq_ctrl; 369 dup_data->last_seq[tid] = hdr->seq_ctrl;
369 dup_data->last_sub_frame[tid] = sub_frame_idx; 370 dup_data->last_sub_frame[tid] = sub_frame_idx;
370 371
@@ -971,7 +972,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
971 if (ieee80211_is_data(hdr->frame_control)) 972 if (ieee80211_is_data(hdr->frame_control))
972 iwl_mvm_rx_csum(sta, skb, desc); 973 iwl_mvm_rx_csum(sta, skb, desc);
973 974
974 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { 975 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
975 kfree_skb(skb); 976 kfree_skb(skb);
976 goto out; 977 goto out;
977 } 978 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 6b2674e02606..630e23cb0ffb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2039,7 +2039,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2039 struct iwl_trans_txq_scd_cfg cfg = { 2039 struct iwl_trans_txq_scd_cfg cfg = {
2040 .fifo = IWL_MVM_TX_FIFO_MCAST, 2040 .fifo = IWL_MVM_TX_FIFO_MCAST,
2041 .sta_id = msta->sta_id, 2041 .sta_id = msta->sta_id,
2042 .tid = IWL_MAX_TID_COUNT, 2042 .tid = 0,
2043 .aggregate = false, 2043 .aggregate = false,
2044 .frame_limit = IWL_FRAME_LIMIT, 2044 .frame_limit = IWL_FRAME_LIMIT,
2045 }; 2045 };
@@ -2053,6 +2053,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2053 return -ENOTSUPP; 2053 return -ENOTSUPP;
2054 2054
2055 /* 2055 /*
2056 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2057 * invalid, so make sure we use the queue we want.
2058 * Note that this is done here as we want to avoid making DQA
2059 * changes in mac80211 layer.
2060 */
2061 if (vif->type == NL80211_IFTYPE_ADHOC) {
2062 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2063 mvmvif->cab_queue = vif->cab_queue;
2064 }
2065
2066 /*
2056 * While in previous FWs we had to exclude cab queue from TFD queue 2067 * While in previous FWs we had to exclude cab queue from TFD queue
2057 * mask, now it is needed as any other queue. 2068 * mask, now it is needed as any other queue.
2058 */ 2069 */
@@ -2079,24 +2090,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2079 if (iwl_mvm_has_new_tx_api(mvm)) { 2090 if (iwl_mvm_has_new_tx_api(mvm)) {
2080 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, 2091 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2081 msta->sta_id, 2092 msta->sta_id,
2082 IWL_MAX_TID_COUNT, 2093 0,
2083 timeout); 2094 timeout);
2084 mvmvif->cab_queue = queue; 2095 mvmvif->cab_queue = queue;
2085 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2096 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2086 IWL_UCODE_TLV_API_STA_TYPE)) { 2097 IWL_UCODE_TLV_API_STA_TYPE))
2087 /*
2088 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2089 * invalid, so make sure we use the queue we want.
2090 * Note that this is done here as we want to avoid making DQA
2091 * changes in mac80211 layer.
2092 */
2093 if (vif->type == NL80211_IFTYPE_ADHOC) {
2094 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2095 mvmvif->cab_queue = vif->cab_queue;
2096 }
2097 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2098 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2098 &cfg, timeout); 2099 &cfg, timeout);
2099 }
2100 2100
2101 return 0; 2101 return 0;
2102} 2102}
@@ -2115,7 +2115,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2115 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); 2115 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2116 2116
2117 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, 2117 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2118 IWL_MAX_TID_COUNT, 0); 2118 0, 0);
2119 2119
2120 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2120 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2121 if (ret) 2121 if (ret)
@@ -3170,8 +3170,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3170 int ret, size; 3170 int ret, size;
3171 u32 status; 3171 u32 status;
3172 3172
3173 /* This is a valid situation for GTK removal */
3173 if (sta_id == IWL_MVM_INVALID_STA) 3174 if (sta_id == IWL_MVM_INVALID_STA)
3174 return -EINVAL; 3175 return 0;
3175 3176
3176 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3177 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3177 STA_KEY_FLG_KEYID_MSK); 3178 STA_KEY_FLG_KEYID_MSK);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 200ab50ec86b..acb217e666db 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -616,7 +616,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
616 time_cmd.repeat = 1; 616 time_cmd.repeat = 1;
617 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 617 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
618 TE_V2_NOTIF_HOST_EVENT_END | 618 TE_V2_NOTIF_HOST_EVENT_END |
619 T2_V2_START_IMMEDIATELY); 619 TE_V2_START_IMMEDIATELY);
620 620
621 if (!wait_for_notif) { 621 if (!wait_for_notif) {
622 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 622 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
@@ -803,7 +803,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
803 time_cmd.repeat = 1; 803 time_cmd.repeat = 1;
804 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 804 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
805 TE_V2_NOTIF_HOST_EVENT_END | 805 TE_V2_NOTIF_HOST_EVENT_END |
806 T2_V2_START_IMMEDIATELY); 806 TE_V2_START_IMMEDIATELY);
807 807
808 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 808 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
809} 809}
@@ -913,6 +913,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
913 time_cmd.interval = cpu_to_le32(1); 913 time_cmd.interval = cpu_to_le32(1);
914 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 914 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
915 TE_V2_ABSENCE); 915 TE_V2_ABSENCE);
916 if (!apply_time)
917 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
916 918
917 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 919 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
918} 920}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dda77b327c98..af6dfceab6b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
419{ 419{
420 struct ieee80211_key_conf *keyconf = info->control.hw_key; 420 struct ieee80211_key_conf *keyconf = info->control.hw_key;
421 u8 *crypto_hdr = skb_frag->data + hdrlen; 421 u8 *crypto_hdr = skb_frag->data + hdrlen;
422 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
422 u64 pn; 423 u64 pn;
423 424
424 switch (keyconf->cipher) { 425 switch (keyconf->cipher) {
425 case WLAN_CIPHER_SUITE_CCMP: 426 case WLAN_CIPHER_SUITE_CCMP:
426 case WLAN_CIPHER_SUITE_CCMP_256:
427 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); 427 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
428 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 428 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
429 break; 429 break;
@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
447 break; 447 break;
448 case WLAN_CIPHER_SUITE_GCMP: 448 case WLAN_CIPHER_SUITE_GCMP:
449 case WLAN_CIPHER_SUITE_GCMP_256: 449 case WLAN_CIPHER_SUITE_GCMP_256:
450 type = TX_CMD_SEC_GCMP;
451 /* Fall through */
452 case WLAN_CIPHER_SUITE_CCMP_256:
450 /* TODO: Taking the key from the table might introduce a race 453 /* TODO: Taking the key from the table might introduce a race
451 * when PTK rekeying is done, having an old packets with a PN 454 * when PTK rekeying is done, having an old packets with a PN
452 * based on the old key but the message encrypted with a new 455 * based on the old key but the message encrypted with a new
453 * one. 456 * one.
454 * Need to handle this. 457 * Need to handle this.
455 */ 458 */
456 tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; 459 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
457 tx_cmd->key[0] = keyconf->hw_key_idx; 460 tx_cmd->key[0] = keyconf->hw_key_idx;
458 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 461 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
459 break; 462 break;
@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
645 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 648 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
646 info.control.vif->type == NL80211_IFTYPE_AP || 649 info.control.vif->type == NL80211_IFTYPE_AP ||
647 info.control.vif->type == NL80211_IFTYPE_ADHOC) { 650 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
648 sta_id = mvmvif->bcast_sta.sta_id; 651 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
652 sta_id = mvmvif->bcast_sta.sta_id;
653 else
654 sta_id = mvmvif->mcast_sta.sta_id;
655
649 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 656 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
650 hdr->frame_control); 657 hdr->frame_control);
651 if (queue < 0) 658 if (queue < 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 6d0a907d5ba5..fabae0f60683 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
147 /* Sanity check on number of chunks */ 147 /* Sanity check on number of chunks */
148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); 148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
149 149
150 if (num_tbs >= trans_pcie->max_tbs) { 150 if (num_tbs > trans_pcie->max_tbs) {
151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
152 return; 152 return;
153 } 153 }
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 3f85713c41dc..1a566287993d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
380 380
381 if (num_tbs >= trans_pcie->max_tbs) { 381 if (num_tbs > trans_pcie->max_tbs) {
382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
383 /* @todo issue fatal error, it is quite serious situation */ 383 /* @todo issue fatal error, it is quite serious situation */
384 return; 384 return;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7b6c3640a94f..100cf42db65d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2742,6 +2742,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2742 mutex_init(&data->mutex); 2742 mutex_init(&data->mutex);
2743 2743
2744 data->netgroup = hwsim_net_get_netgroup(net); 2744 data->netgroup = hwsim_net_get_netgroup(net);
2745 data->wmediumd = hwsim_net_get_wmediumd(net);
2745 2746
2746 /* Enable frame retransmissions for lossy channels */ 2747 /* Enable frame retransmissions for lossy channels */
2747 hw->max_rates = 4; 2748 hw->max_rates = 4;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index f9ccd13c79f9..e7bbbc95cdb1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
1125 1125
1126 /* Configuration Space offset 0x70f BIT7 is used to control L0S */ 1126 /* Configuration Space offset 0x70f BIT7 is used to control L0S */
1127 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); 1127 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
1128 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); 1128 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
1129 ASPM_L1_LATENCY << 3);
1129 1130
1130 /* Configuration Space offset 0x719 Bit3 is for L1 1131 /* Configuration Space offset 0x719 Bit3 is for L1
1131 * BIT4 is for clock request 1132 * BIT4 is for clock request
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 345acca576b3..1bd7b3734751 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
278 disk->queue = q; 278 disk->queue = q;
279 disk->flags = GENHD_FL_EXT_DEVT; 279 disk->flags = GENHD_FL_EXT_DEVT;
280 nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); 280 nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
281 set_capacity(disk, 0);
282 device_add_disk(dev, disk);
283 281
284 if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) 282 if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
285 return -ENOMEM; 283 return -ENOMEM;
@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
292 } 290 }
293 291
294 set_capacity(disk, available_disk_size >> SECTOR_SHIFT); 292 set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
293 device_add_disk(dev, disk);
295 revalidate_disk(disk); 294 revalidate_disk(disk);
296 return 0; 295 return 0;
297} 296}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 2ef544f10ec8..4b95ac513de2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt)
1545 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); 1545 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1546 btt->btt_queue->queuedata = btt; 1546 btt->btt_queue->queuedata = btt;
1547 1547
1548 set_capacity(btt->btt_disk, 0);
1549 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1550 if (btt_meta_size(btt)) { 1548 if (btt_meta_size(btt)) {
1551 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); 1549 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1552 1550
@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt)
1558 } 1556 }
1559 } 1557 }
1560 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); 1558 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1559 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1561 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; 1560 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1562 revalidate_disk(btt->btt_disk); 1561 revalidate_disk(btt->btt_disk);
1563 1562
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index f5c4e8c6e29d..2f4d18752c97 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -304,7 +304,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
304struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, 304struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
305 struct nd_namespace_common *ndns) 305 struct nd_namespace_common *ndns)
306{ 306{
307 struct device *dev = &nd_pfn->dev; 307 struct device *dev;
308 308
309 if (!nd_pfn) 309 if (!nd_pfn)
310 return NULL; 310 return NULL;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e6d01911e092..1593e1806b16 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -532,11 +532,13 @@ static ssize_t persistence_domain_show(struct device *dev,
532 struct device_attribute *attr, char *buf) 532 struct device_attribute *attr, char *buf)
533{ 533{
534 struct nd_region *nd_region = to_nd_region(dev); 534 struct nd_region *nd_region = to_nd_region(dev);
535 unsigned long flags = nd_region->flags;
536 535
537 return sprintf(buf, "%s%s\n", 536 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
538 flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "", 537 return sprintf(buf, "cpu_cache\n");
539 flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : ""); 538 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
539 return sprintf(buf, "memory_controller\n");
540 else
541 return sprintf(buf, "\n");
540} 542}
541static DEVICE_ATTR_RO(persistence_domain); 543static DEVICE_ATTR_RO(persistence_domain);
542 544
@@ -593,6 +595,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
593 return 0; 595 return 0;
594 } 596 }
595 597
598 if (a == &dev_attr_persistence_domain.attr) {
599 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
600 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
601 return 0;
602 return a->mode;
603 }
604
596 if (a != &dev_attr_set_cookie.attr 605 if (a != &dev_attr_set_cookie.attr
597 && a != &dev_attr_available_size.attr) 606 && a != &dev_attr_available_size.attr)
598 return a->mode; 607 return a->mode;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 817e5e2766da..7aeca5db7916 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3033,7 +3033,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3033 ns->disk->disk_name); 3033 ns->disk->disk_name);
3034 3034
3035 nvme_mpath_add_disk(ns->head); 3035 nvme_mpath_add_disk(ns->head);
3036 nvme_mpath_add_disk_links(ns);
3037 return; 3036 return;
3038 out_unlink_ns: 3037 out_unlink_ns:
3039 mutex_lock(&ctrl->subsys->lock); 3038 mutex_lock(&ctrl->subsys->lock);
@@ -3053,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
3053 return; 3052 return;
3054 3053
3055 if (ns->disk && ns->disk->flags & GENHD_FL_UP) { 3054 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
3056 nvme_mpath_remove_disk_links(ns);
3057 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 3055 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
3058 &nvme_ns_id_attr_group); 3056 &nvme_ns_id_attr_group);
3059 if (ns->ndev) 3057 if (ns->ndev)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index a1c58e35075e..8f0f34d06d46 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
650 ret = -EINVAL; 650 ret = -EINVAL;
651 goto out; 651 goto out;
652 } 652 }
653 if (opts->discovery_nqn) {
654 pr_debug("Ignoring nr_io_queues value for discovery controller\n");
655 break;
656 }
657
653 opts->nr_io_queues = min_t(unsigned int, 658 opts->nr_io_queues = min_t(unsigned int,
654 num_online_cpus(), token); 659 num_online_cpus(), token);
655 break; 660 break;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 7f51f8414b97..1dc1387b7134 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1206 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1207 1207
1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1208 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); 1209 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1210 /* Linux supports only Dynamic controllers */ 1210 /* Linux supports only Dynamic controllers */
1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1211 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1212 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1321 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1322 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1323 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); 1324 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1325 1325
1326 lsop->queue = queue; 1326 lsop->queue = queue;
1327 lsreq->rqstaddr = conn_rqst; 1327 lsreq->rqstaddr = conn_rqst;
@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2481 goto out_free_tag_set; 2481 goto out_free_tag_set;
2482 } 2482 }
2483 2483
2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2484 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2485 if (ret) 2485 if (ret)
2486 goto out_cleanup_blk_queue; 2486 goto out_cleanup_blk_queue;
2487 2487
2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2488 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2489 if (ret) 2489 if (ret)
2490 goto out_delete_hw_queues; 2490 goto out_delete_hw_queues;
2491 2491
@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
2532 if (ret) 2532 if (ret)
2533 goto out_free_io_queues; 2533 goto out_free_io_queues;
2534 2534
2535 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2535 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2536 if (ret) 2536 if (ret)
2537 goto out_free_io_queues; 2537 goto out_free_io_queues;
2538 2538
2539 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); 2539 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2540 if (ret) 2540 if (ret)
2541 goto out_delete_hw_queues; 2541 goto out_delete_hw_queues;
2542 2542
@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2632 nvme_fc_init_queue(ctrl, 0); 2632 nvme_fc_init_queue(ctrl, 0);
2633 2633
2634 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 2634 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2635 NVME_AQ_BLK_MQ_DEPTH); 2635 NVME_AQ_DEPTH);
2636 if (ret) 2636 if (ret)
2637 goto out_free_queue; 2637 goto out_free_queue;
2638 2638
2639 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 2639 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
2640 NVME_AQ_BLK_MQ_DEPTH, 2640 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
2641 (NVME_AQ_BLK_MQ_DEPTH / 4));
2642 if (ret) 2641 if (ret)
2643 goto out_delete_hw_queue; 2642 goto out_delete_hw_queue;
2644 2643
@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2666 } 2665 }
2667 2666
2668 ctrl->ctrl.sqsize = 2667 ctrl->ctrl.sqsize =
2669 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); 2668 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
2670 2669
2671 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); 2670 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
2672 if (ret) 2671 if (ret)
@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2699 opts->queue_size = ctrl->ctrl.maxcmd; 2698 opts->queue_size = ctrl->ctrl.maxcmd;
2700 } 2699 }
2701 2700
2701 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
2702 /* warn if sqsize is lower than queue_size */
2703 dev_warn(ctrl->ctrl.device,
2704 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2705 opts->queue_size, ctrl->ctrl.sqsize + 1);
2706 opts->queue_size = ctrl->ctrl.sqsize + 1;
2707 }
2708
2702 ret = nvme_fc_init_aen_ops(ctrl); 2709 ret = nvme_fc_init_aen_ops(ctrl);
2703 if (ret) 2710 if (ret)
2704 goto out_term_aen_ops; 2711 goto out_term_aen_ops;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index b7e5c6db4d92..060f69e03427 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -210,25 +210,6 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
210 mutex_unlock(&head->subsys->lock); 210 mutex_unlock(&head->subsys->lock);
211} 211}
212 212
213void nvme_mpath_add_disk_links(struct nvme_ns *ns)
214{
215 struct kobject *slave_disk_kobj, *holder_disk_kobj;
216
217 if (!ns->head->disk)
218 return;
219
220 slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
221 if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
222 kobject_name(slave_disk_kobj)))
223 return;
224
225 holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
226 if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
227 kobject_name(holder_disk_kobj)))
228 sysfs_remove_link(ns->head->disk->slave_dir,
229 kobject_name(slave_disk_kobj));
230}
231
232void nvme_mpath_remove_disk(struct nvme_ns_head *head) 213void nvme_mpath_remove_disk(struct nvme_ns_head *head)
233{ 214{
234 if (!head->disk) 215 if (!head->disk)
@@ -243,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
243 blk_cleanup_queue(head->disk->queue); 224 blk_cleanup_queue(head->disk->queue);
244 put_disk(head->disk); 225 put_disk(head->disk);
245} 226}
246
247void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
248{
249 if (!ns->head->disk)
250 return;
251
252 sysfs_remove_link(ns->disk->part0.holder_dir,
253 kobject_name(&disk_to_dev(ns->head->disk)->kobj));
254 sysfs_remove_link(ns->head->disk->slave_dir,
255 kobject_name(&disk_to_dev(ns->disk)->kobj));
256}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 0521e4707d1c..d733b14ede9d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -410,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
410void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 410void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
411int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 411int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
412void nvme_mpath_add_disk(struct nvme_ns_head *head); 412void nvme_mpath_add_disk(struct nvme_ns_head *head);
413void nvme_mpath_add_disk_links(struct nvme_ns *ns);
414void nvme_mpath_remove_disk(struct nvme_ns_head *head); 413void nvme_mpath_remove_disk(struct nvme_ns_head *head);
415void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
416 414
417static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 415static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
418{ 416{
@@ -454,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
454static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 452static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
455{ 453{
456} 454}
457static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
458{
459}
460static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
461{
462}
463static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 455static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
464{ 456{
465} 457}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5933a5c732e8..b6f43b738f03 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1153 if (!(csts & NVME_CSTS_CFS) && !nssro) 1153 if (!(csts & NVME_CSTS_CFS) && !nssro)
1154 return false; 1154 return false;
1155 1155
1156 /* If PCI error recovery process is happening, we cannot reset or
1157 * the recovery mechanism will surely fail.
1158 */
1159 if (pci_channel_offline(to_pci_dev(dev->dev)))
1160 return false;
1161
1162 return true; 1156 return true;
1163} 1157}
1164 1158
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1189 struct nvme_command cmd; 1183 struct nvme_command cmd;
1190 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1184 u32 csts = readl(dev->bar + NVME_REG_CSTS);
1191 1185
1186 /* If PCI error recovery process is happening, we cannot reset or
1187 * the recovery mechanism will surely fail.
1188 */
1189 mb();
1190 if (pci_channel_offline(to_pci_dev(dev->dev)))
1191 return BLK_EH_RESET_TIMER;
1192
1192 /* 1193 /*
1193 * Reset immediately if the controller is failed 1194 * Reset immediately if the controller is failed
1194 */ 1195 */
@@ -1913,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1913 int result, nr_io_queues; 1914 int result, nr_io_queues;
1914 unsigned long size; 1915 unsigned long size;
1915 1916
1916 nr_io_queues = num_present_cpus(); 1917 nr_io_queues = num_possible_cpus();
1917 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 1918 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
1918 if (result < 0) 1919 if (result < 0)
1919 return result; 1920 return result;
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 8de2d5c69b1d..dc9303abda42 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
613 /* setup bus numbers */ 613 /* setup bus numbers */
614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 614 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
615 val &= 0xff000000; 615 val &= 0xff000000;
616 val |= 0x00010100; 616 val |= 0x00ff0100;
617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 617 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
618 618
619 /* setup command register */ 619 /* setup command register */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8b14bd326d4a..46d47bd6ca1f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3908,6 +3908,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3908 quirk_dma_func1_alias); 3908 quirk_dma_func1_alias);
3909DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, 3909DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
3910 quirk_dma_func1_alias); 3910 quirk_dma_func1_alias);
3911DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
3912 quirk_dma_func1_alias);
3911/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ 3913/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
3912DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, 3914DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3913 PCI_DEVICE_ID_JMICRON_JMB388_ESD, 3915 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 0c2ed11c0603..f63db346c219 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -638,7 +638,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
638 if (irq_is_percpu_devid(irq)) 638 if (irq_is_percpu_devid(irq))
639 disable_percpu_irq(irq); 639 disable_percpu_irq(irq);
640 else 640 else
641 disable_irq(irq); 641 disable_irq_nosync(irq);
642 } 642 }
643 643
644 per_cpu(cpu_armpmu, cpu) = NULL; 644 per_cpu(cpu_armpmu, cpu) = NULL;
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index c5ff4525edef..c5493ea51282 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -675,3 +675,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
675 return 0; 675 return 0;
676} 676}
677EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); 677EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
678
679MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
680MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
681MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
682MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 6dec6ab13300..d8599736a41a 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev)
423 return ret; 423 return ret;
424} 424}
425 425
426static const struct chromeos_laptop samsung_series_5_550 = { 426static struct chromeos_laptop samsung_series_5_550 = {
427 .i2c_peripherals = { 427 .i2c_peripherals = {
428 /* Touchpad. */ 428 /* Touchpad. */
429 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 429 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
@@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = {
432 }, 432 },
433}; 433};
434 434
435static const struct chromeos_laptop samsung_series_5 = { 435static struct chromeos_laptop samsung_series_5 = {
436 .i2c_peripherals = { 436 .i2c_peripherals = {
437 /* Light Sensor. */ 437 /* Light Sensor. */
438 { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, 438 { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
439 }, 439 },
440}; 440};
441 441
442static const struct chromeos_laptop chromebook_pixel = { 442static struct chromeos_laptop chromebook_pixel = {
443 .i2c_peripherals = { 443 .i2c_peripherals = {
444 /* Touch Screen. */ 444 /* Touch Screen. */
445 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, 445 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
@@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = {
450 }, 450 },
451}; 451};
452 452
453static const struct chromeos_laptop hp_chromebook_14 = { 453static struct chromeos_laptop hp_chromebook_14 = {
454 .i2c_peripherals = { 454 .i2c_peripherals = {
455 /* Touchpad. */ 455 /* Touchpad. */
456 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 456 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
457 }, 457 },
458}; 458};
459 459
460static const struct chromeos_laptop dell_chromebook_11 = { 460static struct chromeos_laptop dell_chromebook_11 = {
461 .i2c_peripherals = { 461 .i2c_peripherals = {
462 /* Touchpad. */ 462 /* Touchpad. */
463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 463 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
@@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = {
466 }, 466 },
467}; 467};
468 468
469static const struct chromeos_laptop toshiba_cb35 = { 469static struct chromeos_laptop toshiba_cb35 = {
470 .i2c_peripherals = { 470 .i2c_peripherals = {
471 /* Touchpad. */ 471 /* Touchpad. */
472 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, 472 { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
473 }, 473 },
474}; 474};
475 475
476static const struct chromeos_laptop acer_c7_chromebook = { 476static struct chromeos_laptop acer_c7_chromebook = {
477 .i2c_peripherals = { 477 .i2c_peripherals = {
478 /* Touchpad. */ 478 /* Touchpad. */
479 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 479 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
480 }, 480 },
481}; 481};
482 482
483static const struct chromeos_laptop acer_ac700 = { 483static struct chromeos_laptop acer_ac700 = {
484 .i2c_peripherals = { 484 .i2c_peripherals = {
485 /* Light Sensor. */ 485 /* Light Sensor. */
486 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, 486 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
487 }, 487 },
488}; 488};
489 489
490static const struct chromeos_laptop acer_c720 = { 490static struct chromeos_laptop acer_c720 = {
491 .i2c_peripherals = { 491 .i2c_peripherals = {
492 /* Touchscreen. */ 492 /* Touchscreen. */
493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, 493 { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
@@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = {
500 }, 500 },
501}; 501};
502 502
503static const struct chromeos_laptop hp_pavilion_14_chromebook = { 503static struct chromeos_laptop hp_pavilion_14_chromebook = {
504 .i2c_peripherals = { 504 .i2c_peripherals = {
505 /* Touchpad. */ 505 /* Touchpad. */
506 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, 506 { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
507 }, 507 },
508}; 508};
509 509
510static const struct chromeos_laptop cr48 = { 510static struct chromeos_laptop cr48 = {
511 .i2c_peripherals = { 511 .i2c_peripherals = {
512 /* Light Sensor. */ 512 /* Light Sensor. */
513 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, 513 { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 9a8f96465cdc..51ebc5a6053f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -105,31 +105,45 @@ config ASUS_LAPTOP
105 105
106 If you have an ACPI-compatible ASUS laptop, say Y or M here. 106 If you have an ACPI-compatible ASUS laptop, say Y or M here.
107 107
108#
109# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
110# backends are selected. The "depends" line prevents a configuration
111# where DELL_SMBIOS=y while either of those dependencies =m.
112#
108config DELL_SMBIOS 113config DELL_SMBIOS
109 tristate 114 tristate "Dell SMBIOS driver"
115 depends on DCDBAS || DCDBAS=n
116 depends on ACPI_WMI || ACPI_WMI=n
117 ---help---
118 This provides support for the Dell SMBIOS calling interface.
119 If you have a Dell computer you should enable this option.
120
121 Be sure to select at least one backend for it to work properly.
110 122
111config DELL_SMBIOS_WMI 123config DELL_SMBIOS_WMI
112 tristate "Dell SMBIOS calling interface (WMI implementation)" 124 bool "Dell SMBIOS driver WMI backend"
125 default y
113 depends on ACPI_WMI 126 depends on ACPI_WMI
114 select DELL_WMI_DESCRIPTOR 127 select DELL_WMI_DESCRIPTOR
115 select DELL_SMBIOS 128 depends on DELL_SMBIOS
116 ---help--- 129 ---help---
117 This provides an implementation for the Dell SMBIOS calling interface 130 This provides an implementation for the Dell SMBIOS calling interface
118 communicated over ACPI-WMI. 131 communicated over ACPI-WMI.
119 132
120 If you have a Dell computer from >2007 you should say Y or M here. 133 If you have a Dell computer from >2007 you should say Y here.
121 If you aren't sure and this module doesn't work for your computer 134 If you aren't sure and this module doesn't work for your computer
122 it just won't load. 135 it just won't load.
123 136
124config DELL_SMBIOS_SMM 137config DELL_SMBIOS_SMM
125 tristate "Dell SMBIOS calling interface (SMM implementation)" 138 bool "Dell SMBIOS driver SMM backend"
139 default y
126 depends on DCDBAS 140 depends on DCDBAS
127 select DELL_SMBIOS 141 depends on DELL_SMBIOS
128 ---help--- 142 ---help---
129 This provides an implementation for the Dell SMBIOS calling interface 143 This provides an implementation for the Dell SMBIOS calling interface
130 communicated over SMI/SMM. 144 communicated over SMI/SMM.
131 145
132 If you have a Dell computer from <=2017 you should say Y or M here. 146 If you have a Dell computer from <=2017 you should say Y here.
133 If you aren't sure and this module doesn't work for your computer 147 If you aren't sure and this module doesn't work for your computer
134 it just won't load. 148 it just won't load.
135 149
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c388608ad2a3..2ba6cb795338 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
13obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o 13obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
14obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o 14obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
15obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o 15obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
16obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o 16dell-smbios-objs := dell-smbios-base.o
17obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o 17dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
18dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
18obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o 19obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
19obj-$(CONFIG_DELL_WMI) += dell-wmi.o 20obj-$(CONFIG_DELL_WMI) += dell-wmi.o
20obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o 21obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c
index 8541cde4cb7d..2485c80a9fdd 100644
--- a/drivers/platform/x86/dell-smbios.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex);
36struct smbios_device { 36struct smbios_device {
37 struct list_head list; 37 struct list_head list;
38 struct device *device; 38 struct device *device;
39 int (*call_fn)(struct calling_interface_buffer *); 39 int (*call_fn)(struct calling_interface_buffer *arg);
40}; 40};
41 41
42struct smbios_call { 42struct smbios_call {
@@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm)
352 struct calling_interface_structure *table = 352 struct calling_interface_structure *table =
353 container_of(dm, struct calling_interface_structure, header); 353 container_of(dm, struct calling_interface_structure, header);
354 354
355 /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least 355 /*
356 6 bytes of entry */ 356 * 4 bytes of table header, plus 7 bytes of Dell header
357 * plus at least 6 bytes of entry
358 */
357 359
358 if (dm->length < 17) 360 if (dm->length < 17)
359 return; 361 return;
@@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev)
554static int __init dell_smbios_init(void) 556static int __init dell_smbios_init(void)
555{ 557{
556 const struct dmi_device *valid; 558 const struct dmi_device *valid;
557 int ret; 559 int ret, wmi, smm;
558 560
559 valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); 561 valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);
560 if (!valid) { 562 if (!valid) {
@@ -589,8 +591,24 @@ static int __init dell_smbios_init(void)
589 if (ret) 591 if (ret)
590 goto fail_create_group; 592 goto fail_create_group;
591 593
594 /* register backends */
595 wmi = init_dell_smbios_wmi();
596 if (wmi)
597 pr_debug("Failed to initialize WMI backend: %d\n", wmi);
598 smm = init_dell_smbios_smm();
599 if (smm)
600 pr_debug("Failed to initialize SMM backend: %d\n", smm);
601 if (wmi && smm) {
602 pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
603 wmi, smm);
604 goto fail_sysfs;
605 }
606
592 return 0; 607 return 0;
593 608
609fail_sysfs:
610 free_group(platform_device);
611
594fail_create_group: 612fail_create_group:
595 platform_device_del(platform_device); 613 platform_device_del(platform_device);
596 614
@@ -607,6 +625,8 @@ fail_platform_driver:
607 625
608static void __exit dell_smbios_exit(void) 626static void __exit dell_smbios_exit(void)
609{ 627{
628 exit_dell_smbios_wmi();
629 exit_dell_smbios_smm();
610 mutex_lock(&smbios_mutex); 630 mutex_lock(&smbios_mutex);
611 if (platform_device) { 631 if (platform_device) {
612 free_group(platform_device); 632 free_group(platform_device);
@@ -617,11 +637,12 @@ static void __exit dell_smbios_exit(void)
617 mutex_unlock(&smbios_mutex); 637 mutex_unlock(&smbios_mutex);
618} 638}
619 639
620subsys_initcall(dell_smbios_init); 640module_init(dell_smbios_init);
621module_exit(dell_smbios_exit); 641module_exit(dell_smbios_exit);
622 642
623MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); 643MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
624MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); 644MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
625MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); 645MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
646MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
626MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); 647MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
627MODULE_LICENSE("GPL"); 648MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index 89f65c4651a0..e9e9da556318 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
58}; 58};
59MODULE_DEVICE_TABLE(dmi, dell_device_table); 59MODULE_DEVICE_TABLE(dmi, dell_device_table);
60 60
61static void __init parse_da_table(const struct dmi_header *dm) 61static void parse_da_table(const struct dmi_header *dm)
62{ 62{
63 struct calling_interface_structure *table = 63 struct calling_interface_structure *table =
64 container_of(dm, struct calling_interface_structure, header); 64 container_of(dm, struct calling_interface_structure, header);
@@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
73 da_command_code = table->cmdIOCode; 73 da_command_code = table->cmdIOCode;
74} 74}
75 75
76static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) 76static void find_cmd_address(const struct dmi_header *dm, void *dummy)
77{ 77{
78 switch (dm->type) { 78 switch (dm->type) {
79 case 0xda: /* Calling interface */ 79 case 0xda: /* Calling interface */
@@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void)
128 return false; 128 return false;
129} 129}
130 130
131static int __init dell_smbios_smm_init(void) 131int init_dell_smbios_smm(void)
132{ 132{
133 int ret; 133 int ret;
134 /* 134 /*
@@ -176,7 +176,7 @@ fail_platform_device_alloc:
176 return ret; 176 return ret;
177} 177}
178 178
179static void __exit dell_smbios_smm_exit(void) 179void exit_dell_smbios_smm(void)
180{ 180{
181 if (platform_device) { 181 if (platform_device) {
182 dell_smbios_unregister_device(&platform_device->dev); 182 dell_smbios_unregister_device(&platform_device->dev);
@@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void)
184 free_page((unsigned long)buffer); 184 free_page((unsigned long)buffer);
185 } 185 }
186} 186}
187
188subsys_initcall(dell_smbios_smm_init);
189module_exit(dell_smbios_smm_exit);
190
191MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
192MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
193MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
194MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
195MODULE_DESCRIPTION("Dell SMBIOS communications over SMI");
196MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 609557aa5868..fbefedb1c172 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
228 { }, 228 { },
229}; 229};
230 230
231static void __init parse_b1_table(const struct dmi_header *dm) 231static void parse_b1_table(const struct dmi_header *dm)
232{ 232{
233 struct misc_bios_flags_structure *flags = 233 struct misc_bios_flags_structure *flags =
234 container_of(dm, struct misc_bios_flags_structure, header); 234 container_of(dm, struct misc_bios_flags_structure, header);
@@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm)
242 wmi_supported = 1; 242 wmi_supported = 1;
243} 243}
244 244
245static void __init find_b1(const struct dmi_header *dm, void *dummy) 245static void find_b1(const struct dmi_header *dm, void *dummy)
246{ 246{
247 switch (dm->type) { 247 switch (dm->type) {
248 case 0xb1: /* misc bios flags */ 248 case 0xb1: /* misc bios flags */
@@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = {
261 .filter_callback = dell_smbios_wmi_filter, 261 .filter_callback = dell_smbios_wmi_filter,
262}; 262};
263 263
264static int __init init_dell_smbios_wmi(void) 264int init_dell_smbios_wmi(void)
265{ 265{
266 dmi_walk(find_b1, NULL); 266 dmi_walk(find_b1, NULL);
267 267
@@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void)
271 return wmi_driver_register(&dell_smbios_wmi_driver); 271 return wmi_driver_register(&dell_smbios_wmi_driver);
272} 272}
273 273
274static void __exit exit_dell_smbios_wmi(void) 274void exit_dell_smbios_wmi(void)
275{ 275{
276 wmi_driver_unregister(&dell_smbios_wmi_driver); 276 wmi_driver_unregister(&dell_smbios_wmi_driver);
277} 277}
278 278
279module_init(init_dell_smbios_wmi);
280module_exit(exit_dell_smbios_wmi);
281
282MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); 279MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
283MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
284MODULE_DESCRIPTION("Dell SMBIOS communications over WMI");
285MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index 138d478d9adc..d8adaf959740 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb);
75int dell_laptop_unregister_notifier(struct notifier_block *nb); 75int dell_laptop_unregister_notifier(struct notifier_block *nb);
76void dell_laptop_call_notifier(unsigned long action, void *data); 76void dell_laptop_call_notifier(unsigned long action, void *data);
77 77
78#endif 78/* for the supported backends */
79#ifdef CONFIG_DELL_SMBIOS_WMI
80int init_dell_smbios_wmi(void);
81void exit_dell_smbios_wmi(void);
82#else /* CONFIG_DELL_SMBIOS_WMI */
83static inline int init_dell_smbios_wmi(void)
84{
85 return -ENODEV;
86}
87static inline void exit_dell_smbios_wmi(void)
88{}
89#endif /* CONFIG_DELL_SMBIOS_WMI */
90
91#ifdef CONFIG_DELL_SMBIOS_SMM
92int init_dell_smbios_smm(void);
93void exit_dell_smbios_smm(void);
94#else /* CONFIG_DELL_SMBIOS_SMM */
95static inline int init_dell_smbios_smm(void)
96{
97 return -ENODEV;
98}
99static inline void exit_dell_smbios_smm(void)
100{}
101#endif /* CONFIG_DELL_SMBIOS_SMM */
102
103#endif /* _DELL_SMBIOS_H_ */
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 2c9927430d85..8d102195a392 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -714,7 +714,7 @@ static int __init dell_wmi_init(void)
714 714
715 return wmi_driver_register(&dell_wmi_driver); 715 return wmi_driver_register(&dell_wmi_driver);
716} 716}
717module_init(dell_wmi_init); 717late_initcall(dell_wmi_init);
718 718
719static void __exit dell_wmi_exit(void) 719static void __exit dell_wmi_exit(void)
720{ 720{
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index dd4708c58480..1fc0c0811da4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4310,7 +4310,7 @@ static int _regulator_resume_early(struct device *dev, void *data)
4310 4310
4311 rstate = regulator_get_suspend_state(rdev, *state); 4311 rstate = regulator_get_suspend_state(rdev, *state);
4312 if (rstate == NULL) 4312 if (rstate == NULL)
4313 return -EINVAL; 4313 return 0;
4314 4314
4315 mutex_lock(&rdev->mutex); 4315 mutex_lock(&rdev->mutex);
4316 4316
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index 72c8b3e1022b..e0a9c445ed67 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev)
51 * arbitrary timeout. 51 * arbitrary timeout.
52 */ 52 */
53 ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, 53 ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val,
54 !(val & STM32_VRR), 650, 10000); 54 val & STM32_VRR, 650, 10000);
55 if (ret) { 55 if (ret) {
56 dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); 56 dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n");
57 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); 57 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index a7c15f0085e2..ecef8e73d40b 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2581,8 +2581,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
2581 case DASD_CQR_QUEUED: 2581 case DASD_CQR_QUEUED:
2582 /* request was not started - just set to cleared */ 2582 /* request was not started - just set to cleared */
2583 cqr->status = DASD_CQR_CLEARED; 2583 cqr->status = DASD_CQR_CLEARED;
2584 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
2585 cqr->callback_data = DASD_SLEEPON_END_TAG;
2586 break; 2584 break;
2587 case DASD_CQR_IN_IO: 2585 case DASD_CQR_IN_IO:
2588 /* request in IO - terminate IO and release again */ 2586 /* request in IO - terminate IO and release again */
@@ -3902,9 +3900,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3902 wait_event(dasd_flush_wq, 3900 wait_event(dasd_flush_wq,
3903 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3901 (cqr->status != DASD_CQR_CLEAR_PENDING));
3904 3902
3905 /* mark sleepon requests as ended */ 3903 /*
3906 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 3904 * requeue requests to blocklayer will only work
3907 cqr->callback_data = DASD_SLEEPON_END_TAG; 3905 * for block device requests
3906 */
3907 if (_dasd_requeue_request(cqr))
3908 continue;
3908 3909
3909 /* remove requests from device and block queue */ 3910 /* remove requests from device and block queue */
3910 list_del_init(&cqr->devlist); 3911 list_del_init(&cqr->devlist);
@@ -3917,13 +3918,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3917 cqr = refers; 3918 cqr = refers;
3918 } 3919 }
3919 3920
3920 /*
3921 * requeue requests to blocklayer will only work
3922 * for block device requests
3923 */
3924 if (_dasd_requeue_request(cqr))
3925 continue;
3926
3927 if (cqr->block) 3921 if (cqr->block)
3928 list_del_init(&cqr->blocklist); 3922 list_del_init(&cqr->blocklist);
3929 cqr->block->base->discipline->free_cp( 3923 cqr->block->base->discipline->free_cp(
@@ -3940,8 +3934,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3940 list_splice_tail(&requeue_queue, &device->ccw_queue); 3934 list_splice_tail(&requeue_queue, &device->ccw_queue);
3941 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3935 spin_unlock_irq(get_ccwdev_lock(device->cdev));
3942 } 3936 }
3943 /* wake up generic waitqueue for eventually ended sleepon requests */ 3937 dasd_schedule_device_bh(device);
3944 wake_up(&generic_waitq);
3945 return rc; 3938 return rc;
3946} 3939}
3947 3940
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1319122e9d12..9169af7dbb43 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -795,6 +795,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
795 795
796 ccw_device_set_timeout(cdev, 0); 796 ccw_device_set_timeout(cdev, 0);
797 cdev->private->iretry = 255; 797 cdev->private->iretry = 255;
798 cdev->private->async_kill_io_rc = -ETIMEDOUT;
798 ret = ccw_device_cancel_halt_clear(cdev); 799 ret = ccw_device_cancel_halt_clear(cdev);
799 if (ret == -EBUSY) { 800 if (ret == -EBUSY) {
800 ccw_device_set_timeout(cdev, 3*HZ); 801 ccw_device_set_timeout(cdev, 3*HZ);
@@ -871,7 +872,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
871 /* OK, i/o is dead now. Call interrupt handler. */ 872 /* OK, i/o is dead now. Call interrupt handler. */
872 if (cdev->handler) 873 if (cdev->handler)
873 cdev->handler(cdev, cdev->private->intparm, 874 cdev->handler(cdev, cdev->private->intparm,
874 ERR_PTR(-EIO)); 875 ERR_PTR(cdev->private->async_kill_io_rc));
875} 876}
876 877
877static void 878static void
@@ -888,14 +889,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
888 ccw_device_online_verify(cdev, 0); 889 ccw_device_online_verify(cdev, 0);
889 if (cdev->handler) 890 if (cdev->handler)
890 cdev->handler(cdev, cdev->private->intparm, 891 cdev->handler(cdev, cdev->private->intparm,
891 ERR_PTR(-EIO)); 892 ERR_PTR(cdev->private->async_kill_io_rc));
892} 893}
893 894
894void ccw_device_kill_io(struct ccw_device *cdev) 895void ccw_device_kill_io(struct ccw_device *cdev)
895{ 896{
896 int ret; 897 int ret;
897 898
899 ccw_device_set_timeout(cdev, 0);
898 cdev->private->iretry = 255; 900 cdev->private->iretry = 255;
901 cdev->private->async_kill_io_rc = -EIO;
899 ret = ccw_device_cancel_halt_clear(cdev); 902 ret = ccw_device_cancel_halt_clear(cdev);
900 if (ret == -EBUSY) { 903 if (ret == -EBUSY) {
901 ccw_device_set_timeout(cdev, 3*HZ); 904 ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 1caf6a398760..75ce12a24dc2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -159,7 +159,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
159} 159}
160 160
161/** 161/**
162 * ccw_device_start_key() - start a s390 channel program with key 162 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
163 * @cdev: target ccw device 163 * @cdev: target ccw device
164 * @cpa: logical start address of channel program 164 * @cpa: logical start address of channel program
165 * @intparm: user specific interruption parameter; will be presented back to 165 * @intparm: user specific interruption parameter; will be presented back to
@@ -170,10 +170,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
170 * @key: storage key to be used for the I/O 170 * @key: storage key to be used for the I/O
171 * @flags: additional flags; defines the action to be performed for I/O 171 * @flags: additional flags; defines the action to be performed for I/O
172 * processing. 172 * processing.
173 * @expires: timeout value in jiffies
173 * 174 *
174 * Start a S/390 channel program. When the interrupt arrives, the 175 * Start a S/390 channel program. When the interrupt arrives, the
175 * IRQ handler is called, either immediately, delayed (dev-end missing, 176 * IRQ handler is called, either immediately, delayed (dev-end missing,
176 * or sense required) or never (no IRQ handler registered). 177 * or sense required) or never (no IRQ handler registered).
178 * This function notifies the device driver if the channel program has not
179 * completed during the time specified by @expires. If a timeout occurs, the
180 * channel program is terminated via xsch, hsch or csch, and the device's
181 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
177 * Returns: 182 * Returns:
178 * %0, if the operation was successful; 183 * %0, if the operation was successful;
179 * -%EBUSY, if the device is busy, or status pending; 184 * -%EBUSY, if the device is busy, or status pending;
@@ -182,9 +187,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
182 * Context: 187 * Context:
183 * Interrupts disabled, ccw device lock held 188 * Interrupts disabled, ccw device lock held
184 */ 189 */
185int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, 190int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
186 unsigned long intparm, __u8 lpm, __u8 key, 191 unsigned long intparm, __u8 lpm, __u8 key,
187 unsigned long flags) 192 unsigned long flags, int expires)
188{ 193{
189 struct subchannel *sch; 194 struct subchannel *sch;
190 int ret; 195 int ret;
@@ -224,6 +229,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
224 switch (ret) { 229 switch (ret) {
225 case 0: 230 case 0:
226 cdev->private->intparm = intparm; 231 cdev->private->intparm = intparm;
232 if (expires)
233 ccw_device_set_timeout(cdev, expires);
227 break; 234 break;
228 case -EACCES: 235 case -EACCES:
229 case -ENODEV: 236 case -ENODEV:
@@ -234,7 +241,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
234} 241}
235 242
236/** 243/**
237 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key 244 * ccw_device_start_key() - start a s390 channel program with key
238 * @cdev: target ccw device 245 * @cdev: target ccw device
239 * @cpa: logical start address of channel program 246 * @cpa: logical start address of channel program
240 * @intparm: user specific interruption parameter; will be presented back to 247 * @intparm: user specific interruption parameter; will be presented back to
@@ -245,15 +252,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
245 * @key: storage key to be used for the I/O 252 * @key: storage key to be used for the I/O
246 * @flags: additional flags; defines the action to be performed for I/O 253 * @flags: additional flags; defines the action to be performed for I/O
247 * processing. 254 * processing.
248 * @expires: timeout value in jiffies
249 * 255 *
250 * Start a S/390 channel program. When the interrupt arrives, the 256 * Start a S/390 channel program. When the interrupt arrives, the
251 * IRQ handler is called, either immediately, delayed (dev-end missing, 257 * IRQ handler is called, either immediately, delayed (dev-end missing,
252 * or sense required) or never (no IRQ handler registered). 258 * or sense required) or never (no IRQ handler registered).
253 * This function notifies the device driver if the channel program has not
254 * completed during the time specified by @expires. If a timeout occurs, the
255 * channel program is terminated via xsch, hsch or csch, and the device's
256 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
257 * Returns: 259 * Returns:
258 * %0, if the operation was successful; 260 * %0, if the operation was successful;
259 * -%EBUSY, if the device is busy, or status pending; 261 * -%EBUSY, if the device is busy, or status pending;
@@ -262,19 +264,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
262 * Context: 264 * Context:
263 * Interrupts disabled, ccw device lock held 265 * Interrupts disabled, ccw device lock held
264 */ 266 */
265int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, 267int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
266 unsigned long intparm, __u8 lpm, __u8 key, 268 unsigned long intparm, __u8 lpm, __u8 key,
267 unsigned long flags, int expires) 269 unsigned long flags)
268{ 270{
269 int ret; 271 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
270 272 flags, 0);
271 if (!cdev)
272 return -ENODEV;
273 ccw_device_set_timeout(cdev, expires);
274 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
275 if (ret != 0)
276 ccw_device_set_timeout(cdev, 0);
277 return ret;
278} 273}
279 274
280/** 275/**
@@ -489,18 +484,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
489EXPORT_SYMBOL(ccw_device_get_id); 484EXPORT_SYMBOL(ccw_device_get_id);
490 485
491/** 486/**
492 * ccw_device_tm_start_key() - perform start function 487 * ccw_device_tm_start_timeout_key() - perform start function
493 * @cdev: ccw device on which to perform the start function 488 * @cdev: ccw device on which to perform the start function
494 * @tcw: transport-command word to be started 489 * @tcw: transport-command word to be started
495 * @intparm: user defined parameter to be passed to the interrupt handler 490 * @intparm: user defined parameter to be passed to the interrupt handler
496 * @lpm: mask of paths to use 491 * @lpm: mask of paths to use
497 * @key: storage key to use for storage access 492 * @key: storage key to use for storage access
493 * @expires: time span in jiffies after which to abort request
498 * 494 *
499 * Start the tcw on the given ccw device. Return zero on success, non-zero 495 * Start the tcw on the given ccw device. Return zero on success, non-zero
500 * otherwise. 496 * otherwise.
501 */ 497 */
502int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, 498int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
503 unsigned long intparm, u8 lpm, u8 key) 499 unsigned long intparm, u8 lpm, u8 key,
500 int expires)
504{ 501{
505 struct subchannel *sch; 502 struct subchannel *sch;
506 int rc; 503 int rc;
@@ -527,37 +524,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
527 return -EACCES; 524 return -EACCES;
528 } 525 }
529 rc = cio_tm_start_key(sch, tcw, lpm, key); 526 rc = cio_tm_start_key(sch, tcw, lpm, key);
530 if (rc == 0) 527 if (rc == 0) {
531 cdev->private->intparm = intparm; 528 cdev->private->intparm = intparm;
529 if (expires)
530 ccw_device_set_timeout(cdev, expires);
531 }
532 return rc; 532 return rc;
533} 533}
534EXPORT_SYMBOL(ccw_device_tm_start_key); 534EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
535 535
536/** 536/**
537 * ccw_device_tm_start_timeout_key() - perform start function 537 * ccw_device_tm_start_key() - perform start function
538 * @cdev: ccw device on which to perform the start function 538 * @cdev: ccw device on which to perform the start function
539 * @tcw: transport-command word to be started 539 * @tcw: transport-command word to be started
540 * @intparm: user defined parameter to be passed to the interrupt handler 540 * @intparm: user defined parameter to be passed to the interrupt handler
541 * @lpm: mask of paths to use 541 * @lpm: mask of paths to use
542 * @key: storage key to use for storage access 542 * @key: storage key to use for storage access
543 * @expires: time span in jiffies after which to abort request
544 * 543 *
545 * Start the tcw on the given ccw device. Return zero on success, non-zero 544 * Start the tcw on the given ccw device. Return zero on success, non-zero
546 * otherwise. 545 * otherwise.
547 */ 546 */
548int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, 547int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
549 unsigned long intparm, u8 lpm, u8 key, 548 unsigned long intparm, u8 lpm, u8 key)
550 int expires)
551{ 549{
552 int ret; 550 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
553
554 ccw_device_set_timeout(cdev, expires);
555 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
556 if (ret != 0)
557 ccw_device_set_timeout(cdev, 0);
558 return ret;
559} 551}
560EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); 552EXPORT_SYMBOL(ccw_device_tm_start_key);
561 553
562/** 554/**
563 * ccw_device_tm_start() - perform start function 555 * ccw_device_tm_start() - perform start function
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index af571d8d6925..90e4e3a7841b 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -157,6 +157,7 @@ struct ccw_device_private {
157 unsigned long intparm; /* user interruption parameter */ 157 unsigned long intparm; /* user interruption parameter */
158 struct qdio_irq *qdio_data; 158 struct qdio_irq *qdio_data;
159 struct irb irb; /* device status */ 159 struct irb irb; /* device status */
160 int async_kill_io_rc;
160 struct senseid senseid; /* SenseID info */ 161 struct senseid senseid; /* SenseID info */
161 struct pgid pgid[8]; /* path group IDs per chpid*/ 162 struct pgid pgid[8]; /* path group IDs per chpid*/
162 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 163 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8c97ce2516bb..19203340f879 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -527,8 +527,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
527 queue == card->qdio.no_in_queues - 1; 527 queue == card->qdio.no_in_queues - 1;
528} 528}
529 529
530 530static int __qeth_issue_next_read(struct qeth_card *card)
531static int qeth_issue_next_read(struct qeth_card *card)
532{ 531{
533 int rc; 532 int rc;
534 struct qeth_cmd_buffer *iob; 533 struct qeth_cmd_buffer *iob;
@@ -559,6 +558,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
559 return rc; 558 return rc;
560} 559}
561 560
561static int qeth_issue_next_read(struct qeth_card *card)
562{
563 int ret;
564
565 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
566 ret = __qeth_issue_next_read(card);
567 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
568
569 return ret;
570}
571
562static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) 572static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
563{ 573{
564 struct qeth_reply *reply; 574 struct qeth_reply *reply;
@@ -957,7 +967,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
957 spin_lock_irqsave(&card->thread_mask_lock, flags); 967 spin_lock_irqsave(&card->thread_mask_lock, flags);
958 card->thread_running_mask &= ~thread; 968 card->thread_running_mask &= ~thread;
959 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 969 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
960 wake_up(&card->wait_q); 970 wake_up_all(&card->wait_q);
961} 971}
962EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); 972EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
963 973
@@ -1161,6 +1171,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1161 } 1171 }
1162 rc = qeth_get_problem(cdev, irb); 1172 rc = qeth_get_problem(cdev, irb);
1163 if (rc) { 1173 if (rc) {
1174 card->read_or_write_problem = 1;
1164 qeth_clear_ipacmd_list(card); 1175 qeth_clear_ipacmd_list(card);
1165 qeth_schedule_recovery(card); 1176 qeth_schedule_recovery(card);
1166 goto out; 1177 goto out;
@@ -1179,7 +1190,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1179 return; 1190 return;
1180 if (channel == &card->read && 1191 if (channel == &card->read &&
1181 channel->state == CH_STATE_UP) 1192 channel->state == CH_STATE_UP)
1182 qeth_issue_next_read(card); 1193 __qeth_issue_next_read(card);
1183 1194
1184 iob = channel->iob; 1195 iob = channel->iob;
1185 index = channel->buf_no; 1196 index = channel->buf_no;
@@ -5083,8 +5094,6 @@ static void qeth_core_free_card(struct qeth_card *card)
5083 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 5094 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
5084 qeth_clean_channel(&card->read); 5095 qeth_clean_channel(&card->read);
5085 qeth_clean_channel(&card->write); 5096 qeth_clean_channel(&card->write);
5086 if (card->dev)
5087 free_netdev(card->dev);
5088 qeth_free_qdio_buffers(card); 5097 qeth_free_qdio_buffers(card);
5089 unregister_service_level(&card->qeth_service_level); 5098 unregister_service_level(&card->qeth_service_level);
5090 kfree(card); 5099 kfree(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8f5babdccb42..50a313806dde 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -913,8 +913,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
913 qeth_l2_set_offline(cgdev); 913 qeth_l2_set_offline(cgdev);
914 914
915 if (card->dev) { 915 if (card->dev) {
916 netif_napi_del(&card->napi);
917 unregister_netdev(card->dev); 916 unregister_netdev(card->dev);
917 free_netdev(card->dev);
918 card->dev = NULL; 918 card->dev = NULL;
919 } 919 }
920 return; 920 return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ef3f548b7d35..c1a16a74aa83 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2713,8 +2713,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2713 qeth_l3_set_offline(cgdev); 2713 qeth_l3_set_offline(cgdev);
2714 2714
2715 if (card->dev) { 2715 if (card->dev) {
2716 netif_napi_del(&card->napi);
2717 unregister_netdev(card->dev); 2716 unregister_netdev(card->dev);
2717 free_netdev(card->dev);
2718 card->dev = NULL; 2718 card->dev = NULL;
2719 } 2719 }
2720 2720
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 57bf43e34863..dd9464920456 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev)
328 if (shost->work_q) 328 if (shost->work_q)
329 destroy_workqueue(shost->work_q); 329 destroy_workqueue(shost->work_q);
330 330
331 destroy_rcu_head(&shost->rcu);
332
333 if (shost->shost_state == SHOST_CREATED) { 331 if (shost->shost_state == SHOST_CREATED) {
334 /* 332 /*
335 * Free the shost_dev device name here if scsi_host_alloc() 333 * Free the shost_dev device name here if scsi_host_alloc()
@@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
404 INIT_LIST_HEAD(&shost->starved_list); 402 INIT_LIST_HEAD(&shost->starved_list);
405 init_waitqueue_head(&shost->host_wait); 403 init_waitqueue_head(&shost->host_wait);
406 mutex_init(&shost->scan_mutex); 404 mutex_init(&shost->scan_mutex);
407 init_rcu_head(&shost->rcu);
408 405
409 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); 406 index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
410 if (index < 0) 407 if (index < 0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 6de9681ace82..ceab5e5c41c2 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -223,6 +223,7 @@ out_done:
223static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 223static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
224{ 224{
225 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 225 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
226 struct domain_device *dev = cmd_to_domain_dev(cmd);
226 struct sas_task *task = TO_SAS_TASK(cmd); 227 struct sas_task *task = TO_SAS_TASK(cmd);
227 228
228 /* At this point, we only get called following an actual abort 229 /* At this point, we only get called following an actual abort
@@ -231,6 +232,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
231 */ 232 */
232 sas_end_task(cmd, task); 233 sas_end_task(cmd, task);
233 234
235 if (dev_is_sata(dev)) {
236 /* defer commands to libata so that libata EH can
237 * handle ata qcs correctly
238 */
239 list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
240 return;
241 }
242
234 /* now finish the command and move it on to the error 243 /* now finish the command and move it on to the error
235 * handler done list, this also takes it off the 244 * handler done list, this also takes it off the
236 * error handler pending list. 245 * error handler pending list.
@@ -238,22 +247,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
238 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 247 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
239} 248}
240 249
241static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
242{
243 struct domain_device *dev = cmd_to_domain_dev(cmd);
244 struct sas_ha_struct *ha = dev->port->ha;
245 struct sas_task *task = TO_SAS_TASK(cmd);
246
247 if (!dev_is_sata(dev)) {
248 sas_eh_finish_cmd(cmd);
249 return;
250 }
251
252 /* report the timeout to libata */
253 sas_end_task(cmd, task);
254 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
255}
256
257static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 250static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
258{ 251{
259 struct scsi_cmnd *cmd, *n; 252 struct scsi_cmnd *cmd, *n;
@@ -261,7 +254,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
261 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 254 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
262 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 255 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
263 cmd->device->lun == my_cmd->device->lun) 256 cmd->device->lun == my_cmd->device->lun)
264 sas_eh_defer_cmd(cmd); 257 sas_eh_finish_cmd(cmd);
265 } 258 }
266} 259}
267 260
@@ -631,12 +624,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
631 case TASK_IS_DONE: 624 case TASK_IS_DONE:
632 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 625 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
633 task); 626 task);
634 sas_eh_defer_cmd(cmd); 627 sas_eh_finish_cmd(cmd);
635 continue; 628 continue;
636 case TASK_IS_ABORTED: 629 case TASK_IS_ABORTED:
637 SAS_DPRINTK("%s: task 0x%p is aborted\n", 630 SAS_DPRINTK("%s: task 0x%p is aborted\n",
638 __func__, task); 631 __func__, task);
639 sas_eh_defer_cmd(cmd); 632 sas_eh_finish_cmd(cmd);
640 continue; 633 continue;
641 case TASK_IS_AT_LU: 634 case TASK_IS_AT_LU:
642 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 635 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -647,7 +640,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
647 "recovered\n", 640 "recovered\n",
648 SAS_ADDR(task->dev), 641 SAS_ADDR(task->dev),
649 cmd->device->lun); 642 cmd->device->lun);
650 sas_eh_defer_cmd(cmd); 643 sas_eh_finish_cmd(cmd);
651 sas_scsi_clear_queue_lu(work_q, cmd); 644 sas_scsi_clear_queue_lu(work_q, cmd);
652 goto Again; 645 goto Again;
653 } 646 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 073ced07e662..dc8e850fbfd2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -216,36 +216,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
216/** 216/**
217 * megasas_fire_cmd_fusion - Sends command to the FW 217 * megasas_fire_cmd_fusion - Sends command to the FW
218 * @instance: Adapter soft state 218 * @instance: Adapter soft state
219 * @req_desc: 32bit or 64bit Request descriptor 219 * @req_desc: 64bit Request descriptor
220 * 220 *
221 * Perform PCI Write. Ventura supports 32 bit Descriptor. 221 * Perform PCI Write.
222 * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
223 */ 222 */
224 223
225static void 224static void
226megasas_fire_cmd_fusion(struct megasas_instance *instance, 225megasas_fire_cmd_fusion(struct megasas_instance *instance,
227 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) 226 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
228{ 227{
229 if (instance->adapter_type == VENTURA_SERIES)
230 writel(le32_to_cpu(req_desc->u.low),
231 &instance->reg_set->inbound_single_queue_port);
232 else {
233#if defined(writeq) && defined(CONFIG_64BIT) 228#if defined(writeq) && defined(CONFIG_64BIT)
234 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | 229 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
235 le32_to_cpu(req_desc->u.low)); 230 le32_to_cpu(req_desc->u.low));
236 231
237 writeq(req_data, &instance->reg_set->inbound_low_queue_port); 232 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
238#else 233#else
239 unsigned long flags; 234 unsigned long flags;
240 spin_lock_irqsave(&instance->hba_lock, flags); 235 spin_lock_irqsave(&instance->hba_lock, flags);
241 writel(le32_to_cpu(req_desc->u.low), 236 writel(le32_to_cpu(req_desc->u.low),
242 &instance->reg_set->inbound_low_queue_port); 237 &instance->reg_set->inbound_low_queue_port);
243 writel(le32_to_cpu(req_desc->u.high), 238 writel(le32_to_cpu(req_desc->u.high),
244 &instance->reg_set->inbound_high_queue_port); 239 &instance->reg_set->inbound_high_queue_port);
245 mmiowb(); 240 mmiowb();
246 spin_unlock_irqrestore(&instance->hba_lock, flags); 241 spin_unlock_irqrestore(&instance->hba_lock, flags);
247#endif 242#endif
248 }
249} 243}
250 244
251/** 245/**
@@ -982,7 +976,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
982 const char *sys_info; 976 const char *sys_info;
983 MFI_CAPABILITIES *drv_ops; 977 MFI_CAPABILITIES *drv_ops;
984 u32 scratch_pad_2; 978 u32 scratch_pad_2;
985 unsigned long flags;
986 ktime_t time; 979 ktime_t time;
987 bool cur_fw_64bit_dma_capable; 980 bool cur_fw_64bit_dma_capable;
988 981
@@ -1121,14 +1114,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
1121 break; 1114 break;
1122 } 1115 }
1123 1116
1124 /* For Ventura also IOC INIT required 64 bit Descriptor write. */ 1117 megasas_fire_cmd_fusion(instance, &req_desc);
1125 spin_lock_irqsave(&instance->hba_lock, flags);
1126 writel(le32_to_cpu(req_desc.u.low),
1127 &instance->reg_set->inbound_low_queue_port);
1128 writel(le32_to_cpu(req_desc.u.high),
1129 &instance->reg_set->inbound_high_queue_port);
1130 mmiowb();
1131 spin_unlock_irqrestore(&instance->hba_lock, flags);
1132 1118
1133 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 1119 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
1134 1120
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59a87ca328d3..0aafbfd1b746 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -6297,14 +6297,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
6297} 6297}
6298 6298
6299/** 6299/**
6300 * _wait_for_commands_to_complete - reset controller 6300 * mpt3sas_wait_for_commands_to_complete - reset controller
6301 * @ioc: Pointer to MPT_ADAPTER structure 6301 * @ioc: Pointer to MPT_ADAPTER structure
6302 * 6302 *
6303 * This function is waiting 10s for all pending commands to complete 6303 * This function is waiting 10s for all pending commands to complete
6304 * prior to putting controller in reset. 6304 * prior to putting controller in reset.
6305 */ 6305 */
6306static void 6306void
6307_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) 6307mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6308{ 6308{
6309 u32 ioc_state; 6309 u32 ioc_state;
6310 6310
@@ -6377,7 +6377,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6377 is_fault = 1; 6377 is_fault = 1;
6378 } 6378 }
6379 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 6379 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
6380 _wait_for_commands_to_complete(ioc); 6380 mpt3sas_wait_for_commands_to_complete(ioc);
6381 _base_mask_interrupts(ioc); 6381 _base_mask_interrupts(ioc);
6382 r = _base_make_ioc_ready(ioc, type); 6382 r = _base_make_ioc_ready(ioc, type);
6383 if (r) 6383 if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 789bc421424b..99ccf83b8c51 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1433,6 +1433,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
1433 1433
1434int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); 1434int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
1435 1435
1436void
1437mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
1438
1436 1439
1437/* scsih shared API */ 1440/* scsih shared API */
1438struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, 1441struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 74fca184dba9..a1cb0236c550 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2835,7 +2835,8 @@ scsih_abort(struct scsi_cmnd *scmd)
2835 _scsih_tm_display_info(ioc, scmd); 2835 _scsih_tm_display_info(ioc, scmd);
2836 2836
2837 sas_device_priv_data = scmd->device->hostdata; 2837 sas_device_priv_data = scmd->device->hostdata;
2838 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2838 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2839 ioc->remove_host) {
2839 sdev_printk(KERN_INFO, scmd->device, 2840 sdev_printk(KERN_INFO, scmd->device,
2840 "device been deleted! scmd(%p)\n", scmd); 2841 "device been deleted! scmd(%p)\n", scmd);
2841 scmd->result = DID_NO_CONNECT << 16; 2842 scmd->result = DID_NO_CONNECT << 16;
@@ -2898,7 +2899,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2898 _scsih_tm_display_info(ioc, scmd); 2899 _scsih_tm_display_info(ioc, scmd);
2899 2900
2900 sas_device_priv_data = scmd->device->hostdata; 2901 sas_device_priv_data = scmd->device->hostdata;
2901 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2902 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2903 ioc->remove_host) {
2902 sdev_printk(KERN_INFO, scmd->device, 2904 sdev_printk(KERN_INFO, scmd->device,
2903 "device been deleted! scmd(%p)\n", scmd); 2905 "device been deleted! scmd(%p)\n", scmd);
2904 scmd->result = DID_NO_CONNECT << 16; 2906 scmd->result = DID_NO_CONNECT << 16;
@@ -2961,7 +2963,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
2961 _scsih_tm_display_info(ioc, scmd); 2963 _scsih_tm_display_info(ioc, scmd);
2962 2964
2963 sas_device_priv_data = scmd->device->hostdata; 2965 sas_device_priv_data = scmd->device->hostdata;
2964 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 2966 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2967 ioc->remove_host) {
2965 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", 2968 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
2966 scmd); 2969 scmd);
2967 scmd->result = DID_NO_CONNECT << 16; 2970 scmd->result = DID_NO_CONNECT << 16;
@@ -3019,7 +3022,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
3019 ioc->name, scmd); 3022 ioc->name, scmd);
3020 scsi_print_command(scmd); 3023 scsi_print_command(scmd);
3021 3024
3022 if (ioc->is_driver_loading) { 3025 if (ioc->is_driver_loading || ioc->remove_host) {
3023 pr_info(MPT3SAS_FMT "Blocking the host reset\n", 3026 pr_info(MPT3SAS_FMT "Blocking the host reset\n",
3024 ioc->name); 3027 ioc->name);
3025 r = FAILED; 3028 r = FAILED;
@@ -4453,7 +4456,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4453 st = scsi_cmd_priv(scmd); 4456 st = scsi_cmd_priv(scmd);
4454 mpt3sas_base_clear_st(ioc, st); 4457 mpt3sas_base_clear_st(ioc, st);
4455 scsi_dma_unmap(scmd); 4458 scsi_dma_unmap(scmd);
4456 if (ioc->pci_error_recovery) 4459 if (ioc->pci_error_recovery || ioc->remove_host)
4457 scmd->result = DID_NO_CONNECT << 16; 4460 scmd->result = DID_NO_CONNECT << 16;
4458 else 4461 else
4459 scmd->result = DID_RESET << 16; 4462 scmd->result = DID_RESET << 16;
@@ -9739,6 +9742,10 @@ static void scsih_remove(struct pci_dev *pdev)
9739 unsigned long flags; 9742 unsigned long flags;
9740 9743
9741 ioc->remove_host = 1; 9744 ioc->remove_host = 1;
9745
9746 mpt3sas_wait_for_commands_to_complete(ioc);
9747 _scsih_flush_running_cmds(ioc);
9748
9742 _scsih_fw_event_cleanup_queue(ioc); 9749 _scsih_fw_event_cleanup_queue(ioc);
9743 9750
9744 spin_lock_irqsave(&ioc->fw_event_lock, flags); 9751 spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@ -9815,6 +9822,10 @@ scsih_shutdown(struct pci_dev *pdev)
9815 unsigned long flags; 9822 unsigned long flags;
9816 9823
9817 ioc->remove_host = 1; 9824 ioc->remove_host = 1;
9825
9826 mpt3sas_wait_for_commands_to_complete(ioc);
9827 _scsih_flush_running_cmds(ioc);
9828
9818 _scsih_fw_event_cleanup_queue(ioc); 9829 _scsih_fw_event_cleanup_queue(ioc);
9819 9830
9820 spin_lock_irqsave(&ioc->fw_event_lock, flags); 9831 spin_lock_irqsave(&ioc->fw_event_lock, flags);
@@ -10547,7 +10558,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10547 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 10558 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10548 "fw_event_%s%d", ioc->driver_name, ioc->id); 10559 "fw_event_%s%d", ioc->driver_name, ioc->id);
10549 ioc->firmware_event_thread = alloc_ordered_workqueue( 10560 ioc->firmware_event_thread = alloc_ordered_workqueue(
10550 ioc->firmware_event_name, WQ_MEM_RECLAIM); 10561 ioc->firmware_event_name, 0);
10551 if (!ioc->firmware_event_thread) { 10562 if (!ioc->firmware_event_thread) {
10552 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 10563 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
10553 ioc->name, __FILE__, __LINE__, __func__); 10564 ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 667d7697ba01..d09afe1b567d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -762,6 +762,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
762 762
763 iscsi_cid = cqe->conn_id; 763 iscsi_cid = cqe->conn_id;
764 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; 764 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
765 if (!qedi_conn) {
766 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
767 "icid not found 0x%x\n", cqe->conn_id);
768 return;
769 }
765 770
766 /* Based on this itt get the corresponding qedi_cmd */ 771 /* Based on this itt get the corresponding qedi_cmd */
767 spin_lock_bh(&qedi_conn->tmf_work_lock); 772 spin_lock_bh(&qedi_conn->tmf_work_lock);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index be7d6824581a..c9689f97c307 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -261,9 +261,9 @@
261struct name_list_extended { 261struct name_list_extended {
262 struct get_name_list_extended *l; 262 struct get_name_list_extended *l;
263 dma_addr_t ldma; 263 dma_addr_t ldma;
264 struct list_head fcports; /* protect by sess_list */ 264 struct list_head fcports;
265 spinlock_t fcports_lock;
265 u32 size; 266 u32 size;
266 u8 sent;
267}; 267};
268/* 268/*
269 * Timeout timer counts in seconds 269 * Timeout timer counts in seconds
@@ -2217,6 +2217,7 @@ typedef struct {
2217 2217
2218/* FCP-4 types */ 2218/* FCP-4 types */
2219#define FC4_TYPE_FCP_SCSI 0x08 2219#define FC4_TYPE_FCP_SCSI 0x08
2220#define FC4_TYPE_NVME 0x28
2220#define FC4_TYPE_OTHER 0x0 2221#define FC4_TYPE_OTHER 0x0
2221#define FC4_TYPE_UNKNOWN 0xff 2222#define FC4_TYPE_UNKNOWN 0xff
2222 2223
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 5bf9a59432f6..403fa096f8c8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3179,6 +3179,7 @@ done_free_sp:
3179 sp->free(sp); 3179 sp->free(sp);
3180 fcport->flags &= ~FCF_ASYNC_SENT; 3180 fcport->flags &= ~FCF_ASYNC_SENT;
3181done: 3181done:
3182 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3182 return rval; 3183 return rval;
3183} 3184}
3184 3185
@@ -3370,6 +3371,7 @@ done_free_sp:
3370 sp->free(sp); 3371 sp->free(sp);
3371 fcport->flags &= ~FCF_ASYNC_SENT; 3372 fcport->flags &= ~FCF_ASYNC_SENT;
3372done: 3373done:
3374 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3373 return rval; 3375 return rval;
3374} 3376}
3375 3377
@@ -3971,6 +3973,9 @@ out:
3971 spin_lock_irqsave(&vha->work_lock, flags); 3973 spin_lock_irqsave(&vha->work_lock, flags);
3972 vha->scan.scan_flags &= ~SF_SCANNING; 3974 vha->scan.scan_flags &= ~SF_SCANNING;
3973 spin_unlock_irqrestore(&vha->work_lock, flags); 3975 spin_unlock_irqrestore(&vha->work_lock, flags);
3976
3977 if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled)
3978 qla24xx_async_gpnft(vha, FC4_TYPE_NVME);
3974} 3979}
3975 3980
3976static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) 3981static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 2dea1129d396..8d7fab3cd01d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -213,6 +213,7 @@ done_free_sp:
213 sp->free(sp); 213 sp->free(sp);
214 fcport->flags &= ~FCF_ASYNC_SENT; 214 fcport->flags &= ~FCF_ASYNC_SENT;
215done: 215done:
216 fcport->flags &= ~FCF_ASYNC_ACTIVE;
216 return rval; 217 return rval;
217} 218}
218 219
@@ -263,7 +264,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
263done_free_sp: 264done_free_sp:
264 sp->free(sp); 265 sp->free(sp);
265done: 266done:
266 fcport->flags &= ~FCF_ASYNC_SENT; 267 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
267 return rval; 268 return rval;
268} 269}
269 270
@@ -271,6 +272,7 @@ void
271qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, 272qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
272 uint16_t *data) 273 uint16_t *data)
273{ 274{
275 fcport->flags &= ~FCF_ASYNC_ACTIVE;
274 /* Don't re-login in target mode */ 276 /* Don't re-login in target mode */
275 if (!fcport->tgt_session) 277 if (!fcport->tgt_session)
276 qla2x00_mark_device_lost(vha, fcport, 1, 0); 278 qla2x00_mark_device_lost(vha, fcport, 1, 0);
@@ -284,6 +286,7 @@ qla2x00_async_prlo_sp_done(void *s, int res)
284 struct srb_iocb *lio = &sp->u.iocb_cmd; 286 struct srb_iocb *lio = &sp->u.iocb_cmd;
285 struct scsi_qla_host *vha = sp->vha; 287 struct scsi_qla_host *vha = sp->vha;
286 288
289 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
287 if (!test_bit(UNLOADING, &vha->dpc_flags)) 290 if (!test_bit(UNLOADING, &vha->dpc_flags))
288 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, 291 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
289 lio->u.logio.data); 292 lio->u.logio.data);
@@ -322,6 +325,7 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
322done_free_sp: 325done_free_sp:
323 sp->free(sp); 326 sp->free(sp);
324done: 327done:
328 fcport->flags &= ~FCF_ASYNC_ACTIVE;
325 return rval; 329 return rval;
326} 330}
327 331
@@ -375,6 +379,8 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
375 "Async done-%s res %x %8phC\n", 379 "Async done-%s res %x %8phC\n",
376 sp->name, res, sp->fcport->port_name); 380 sp->name, res, sp->fcport->port_name);
377 381
382 sp->fcport->flags &= ~FCF_ASYNC_SENT;
383
378 memset(&ea, 0, sizeof(ea)); 384 memset(&ea, 0, sizeof(ea));
379 ea.event = FCME_ADISC_DONE; 385 ea.event = FCME_ADISC_DONE;
380 ea.rc = res; 386 ea.rc = res;
@@ -425,7 +431,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
425done_free_sp: 431done_free_sp:
426 sp->free(sp); 432 sp->free(sp);
427done: 433done:
428 fcport->flags &= ~FCF_ASYNC_SENT; 434 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
429 qla2x00_post_async_adisc_work(vha, fcport, data); 435 qla2x00_post_async_adisc_work(vha, fcport, data);
430 return rval; 436 return rval;
431} 437}
@@ -643,8 +649,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
643 (loop_id & 0x7fff)); 649 (loop_id & 0x7fff));
644 } 650 }
645 651
646 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 652 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
647 vha->gnl.sent = 0;
648 653
649 INIT_LIST_HEAD(&h); 654 INIT_LIST_HEAD(&h);
650 fcport = tf = NULL; 655 fcport = tf = NULL;
@@ -653,12 +658,16 @@ qla24xx_async_gnl_sp_done(void *s, int res)
653 658
654 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { 659 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
655 list_del_init(&fcport->gnl_entry); 660 list_del_init(&fcport->gnl_entry);
661 spin_lock(&vha->hw->tgt.sess_lock);
656 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); 662 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
663 spin_unlock(&vha->hw->tgt.sess_lock);
657 ea.fcport = fcport; 664 ea.fcport = fcport;
658 665
659 qla2x00_fcport_event_handler(vha, &ea); 666 qla2x00_fcport_event_handler(vha, &ea);
660 } 667 }
668 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
661 669
670 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
662 /* create new fcport if fw has knowledge of new sessions */ 671 /* create new fcport if fw has knowledge of new sessions */
663 for (i = 0; i < n; i++) { 672 for (i = 0; i < n; i++) {
664 port_id_t id; 673 port_id_t id;
@@ -710,18 +719,21 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
710 ql_dbg(ql_dbg_disc, vha, 0x20d9, 719 ql_dbg(ql_dbg_disc, vha, 0x20d9,
711 "Async-gnlist WWPN %8phC \n", fcport->port_name); 720 "Async-gnlist WWPN %8phC \n", fcport->port_name);
712 721
713 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 722 spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
723 if (!list_empty(&fcport->gnl_entry)) {
724 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
725 rval = QLA_SUCCESS;
726 goto done;
727 }
728
729 spin_lock(&vha->hw->tgt.sess_lock);
714 fcport->disc_state = DSC_GNL; 730 fcport->disc_state = DSC_GNL;
715 fcport->last_rscn_gen = fcport->rscn_gen; 731 fcport->last_rscn_gen = fcport->rscn_gen;
716 fcport->last_login_gen = fcport->login_gen; 732 fcport->last_login_gen = fcport->login_gen;
733 spin_unlock(&vha->hw->tgt.sess_lock);
717 734
718 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); 735 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
719 if (vha->gnl.sent) { 736 spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
720 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
721 return QLA_SUCCESS;
722 }
723 vha->gnl.sent = 1;
724 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
725 737
726 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 738 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
727 if (!sp) 739 if (!sp)
@@ -1049,6 +1061,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1049 fc_port_t *fcport = ea->fcport; 1061 fc_port_t *fcport = ea->fcport;
1050 struct port_database_24xx *pd; 1062 struct port_database_24xx *pd;
1051 struct srb *sp = ea->sp; 1063 struct srb *sp = ea->sp;
1064 uint8_t ls;
1052 1065
1053 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; 1066 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1054 1067
@@ -1061,7 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1061 if (fcport->disc_state == DSC_DELETE_PEND) 1074 if (fcport->disc_state == DSC_DELETE_PEND)
1062 return; 1075 return;
1063 1076
1064 switch (pd->current_login_state) { 1077 if (fcport->fc4f_nvme)
1078 ls = pd->current_login_state >> 4;
1079 else
1080 ls = pd->current_login_state & 0xf;
1081
1082 switch (ls) {
1065 case PDS_PRLI_COMPLETE: 1083 case PDS_PRLI_COMPLETE:
1066 __qla24xx_parse_gpdb(vha, fcport, pd); 1084 __qla24xx_parse_gpdb(vha, fcport, pd);
1067 break; 1085 break;
@@ -1151,8 +1169,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1151 if (fcport->scan_state != QLA_FCPORT_FOUND) 1169 if (fcport->scan_state != QLA_FCPORT_FOUND)
1152 return 0; 1170 return 0;
1153 1171
1154 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || 1172 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1155 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) 1173 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1174 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1156 return 0; 1175 return 0;
1157 1176
1158 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { 1177 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
@@ -1527,6 +1546,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
1527 srb_t *sp = ptr; 1546 srb_t *sp = ptr;
1528 struct srb_iocb *abt = &sp->u.iocb_cmd; 1547 struct srb_iocb *abt = &sp->u.iocb_cmd;
1529 1548
1549 del_timer(&sp->u.iocb_cmd.timer);
1530 complete(&abt->u.abt.comp); 1550 complete(&abt->u.abt.comp);
1531} 1551}
1532 1552
@@ -1699,7 +1719,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1699 1719
1700 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 1720 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1701 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1721 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1702 ea->fcport->loop_id = FC_NO_LOOP_ID;
1703 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1722 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1704 ea->fcport->logout_on_delete = 1; 1723 ea->fcport->logout_on_delete = 1;
1705 ea->fcport->send_els_logo = 0; 1724 ea->fcport->send_els_logo = 0;
@@ -1791,6 +1810,7 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1791 qla2x00_mark_device_lost(vha, fcport, 1, 0); 1810 qla2x00_mark_device_lost(vha, fcport, 1, 0);
1792 qlt_logo_completion_handler(fcport, data[0]); 1811 qlt_logo_completion_handler(fcport, data[0]);
1793 fcport->login_gen++; 1812 fcport->login_gen++;
1813 fcport->flags &= ~FCF_ASYNC_ACTIVE;
1794 return; 1814 return;
1795} 1815}
1796 1816
@@ -1798,6 +1818,7 @@ void
1798qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, 1818qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1799 uint16_t *data) 1819 uint16_t *data)
1800{ 1820{
1821 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1801 if (data[0] == MBS_COMMAND_COMPLETE) { 1822 if (data[0] == MBS_COMMAND_COMPLETE) {
1802 qla2x00_update_fcport(vha, fcport); 1823 qla2x00_update_fcport(vha, fcport);
1803 1824
@@ -1805,7 +1826,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
1805 } 1826 }
1806 1827
1807 /* Retry login. */ 1828 /* Retry login. */
1808 fcport->flags &= ~FCF_ASYNC_SENT;
1809 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 1829 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
1810 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1830 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1811 else 1831 else
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index afcb5567998a..5c5dcca4d1da 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -454,7 +454,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
454 ha->req_q_map[0] = req; 454 ha->req_q_map[0] = req;
455 set_bit(0, ha->rsp_qid_map); 455 set_bit(0, ha->rsp_qid_map);
456 set_bit(0, ha->req_qid_map); 456 set_bit(0, ha->req_qid_map);
457 return 1; 457 return 0;
458 458
459fail_qpair_map: 459fail_qpair_map:
460 kfree(ha->base_qpair); 460 kfree(ha->base_qpair);
@@ -471,6 +471,9 @@ fail_req_map:
471 471
472static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 472static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
473{ 473{
474 if (!ha->req_q_map)
475 return;
476
474 if (IS_QLAFX00(ha)) { 477 if (IS_QLAFX00(ha)) {
475 if (req && req->ring_fx00) 478 if (req && req->ring_fx00)
476 dma_free_coherent(&ha->pdev->dev, 479 dma_free_coherent(&ha->pdev->dev,
@@ -481,14 +484,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
481 (req->length + 1) * sizeof(request_t), 484 (req->length + 1) * sizeof(request_t),
482 req->ring, req->dma); 485 req->ring, req->dma);
483 486
484 if (req) 487 if (req) {
485 kfree(req->outstanding_cmds); 488 kfree(req->outstanding_cmds);
486 489 kfree(req);
487 kfree(req); 490 }
488} 491}
489 492
490static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 493static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
491{ 494{
495 if (!ha->rsp_q_map)
496 return;
497
492 if (IS_QLAFX00(ha)) { 498 if (IS_QLAFX00(ha)) {
493 if (rsp && rsp->ring) 499 if (rsp && rsp->ring)
494 dma_free_coherent(&ha->pdev->dev, 500 dma_free_coherent(&ha->pdev->dev,
@@ -499,7 +505,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
499 (rsp->length + 1) * sizeof(response_t), 505 (rsp->length + 1) * sizeof(response_t),
500 rsp->ring, rsp->dma); 506 rsp->ring, rsp->dma);
501 } 507 }
502 kfree(rsp); 508 if (rsp)
509 kfree(rsp);
503} 510}
504 511
505static void qla2x00_free_queues(struct qla_hw_data *ha) 512static void qla2x00_free_queues(struct qla_hw_data *ha)
@@ -1723,6 +1730,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1723 struct qla_tgt_cmd *cmd; 1730 struct qla_tgt_cmd *cmd;
1724 uint8_t trace = 0; 1731 uint8_t trace = 0;
1725 1732
1733 if (!ha->req_q_map)
1734 return;
1726 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1735 spin_lock_irqsave(qp->qp_lock_ptr, flags);
1727 req = qp->req; 1736 req = qp->req;
1728 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1737 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
@@ -3095,14 +3104,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3095 /* Set up the irqs */ 3104 /* Set up the irqs */
3096 ret = qla2x00_request_irqs(ha, rsp); 3105 ret = qla2x00_request_irqs(ha, rsp);
3097 if (ret) 3106 if (ret)
3098 goto probe_hw_failed; 3107 goto probe_failed;
3099 3108
3100 /* Alloc arrays of request and response ring ptrs */ 3109 /* Alloc arrays of request and response ring ptrs */
3101 if (!qla2x00_alloc_queues(ha, req, rsp)) { 3110 if (qla2x00_alloc_queues(ha, req, rsp)) {
3102 ql_log(ql_log_fatal, base_vha, 0x003d, 3111 ql_log(ql_log_fatal, base_vha, 0x003d,
3103 "Failed to allocate memory for queue pointers..." 3112 "Failed to allocate memory for queue pointers..."
3104 "aborting.\n"); 3113 "aborting.\n");
3105 goto probe_init_failed; 3114 goto probe_failed;
3106 } 3115 }
3107 3116
3108 if (ha->mqenable && shost_use_blk_mq(host)) { 3117 if (ha->mqenable && shost_use_blk_mq(host)) {
@@ -3387,15 +3396,6 @@ skip_dpc:
3387 3396
3388 return 0; 3397 return 0;
3389 3398
3390probe_init_failed:
3391 qla2x00_free_req_que(ha, req);
3392 ha->req_q_map[0] = NULL;
3393 clear_bit(0, ha->req_qid_map);
3394 qla2x00_free_rsp_que(ha, rsp);
3395 ha->rsp_q_map[0] = NULL;
3396 clear_bit(0, ha->rsp_qid_map);
3397 ha->max_req_queues = ha->max_rsp_queues = 0;
3398
3399probe_failed: 3399probe_failed:
3400 if (base_vha->timer_active) 3400 if (base_vha->timer_active)
3401 qla2x00_stop_timer(base_vha); 3401 qla2x00_stop_timer(base_vha);
@@ -4508,11 +4508,17 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4508 if (ha->init_cb) 4508 if (ha->init_cb)
4509 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4509 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
4510 ha->init_cb, ha->init_cb_dma); 4510 ha->init_cb, ha->init_cb_dma);
4511 vfree(ha->optrom_buffer); 4511
4512 kfree(ha->nvram); 4512 if (ha->optrom_buffer)
4513 kfree(ha->npiv_info); 4513 vfree(ha->optrom_buffer);
4514 kfree(ha->swl); 4514 if (ha->nvram)
4515 kfree(ha->loop_id_map); 4515 kfree(ha->nvram);
4516 if (ha->npiv_info)
4517 kfree(ha->npiv_info);
4518 if (ha->swl)
4519 kfree(ha->swl);
4520 if (ha->loop_id_map)
4521 kfree(ha->loop_id_map);
4516 4522
4517 ha->srb_mempool = NULL; 4523 ha->srb_mempool = NULL;
4518 ha->ctx_mempool = NULL; 4524 ha->ctx_mempool = NULL;
@@ -4528,6 +4534,15 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4528 ha->ex_init_cb_dma = 0; 4534 ha->ex_init_cb_dma = 0;
4529 ha->async_pd = NULL; 4535 ha->async_pd = NULL;
4530 ha->async_pd_dma = 0; 4536 ha->async_pd_dma = 0;
4537 ha->loop_id_map = NULL;
4538 ha->npiv_info = NULL;
4539 ha->optrom_buffer = NULL;
4540 ha->swl = NULL;
4541 ha->nvram = NULL;
4542 ha->mctp_dump = NULL;
4543 ha->dcbx_tlv = NULL;
4544 ha->xgmac_data = NULL;
4545 ha->sfp_data = NULL;
4531 4546
4532 ha->s_dma_pool = NULL; 4547 ha->s_dma_pool = NULL;
4533 ha->dl_dma_pool = NULL; 4548 ha->dl_dma_pool = NULL;
@@ -4577,6 +4592,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4577 4592
4578 spin_lock_init(&vha->work_lock); 4593 spin_lock_init(&vha->work_lock);
4579 spin_lock_init(&vha->cmd_list_lock); 4594 spin_lock_init(&vha->cmd_list_lock);
4595 spin_lock_init(&vha->gnl.fcports_lock);
4580 init_waitqueue_head(&vha->fcport_waitQ); 4596 init_waitqueue_head(&vha->fcport_waitQ);
4581 init_waitqueue_head(&vha->vref_waitq); 4597 init_waitqueue_head(&vha->vref_waitq);
4582 4598
@@ -4806,9 +4822,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4806 fcport->d_id = e->u.new_sess.id; 4822 fcport->d_id = e->u.new_sess.id;
4807 fcport->flags |= FCF_FABRIC_DEVICE; 4823 fcport->flags |= FCF_FABRIC_DEVICE;
4808 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 4824 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4809 if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) 4825 if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) {
4810 fcport->fc4_type = FC4_TYPE_FCP_SCSI; 4826 fcport->fc4_type = FC4_TYPE_FCP_SCSI;
4811 4827 } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) {
4828 fcport->fc4_type = FC4_TYPE_OTHER;
4829 fcport->fc4f_nvme = FC4_TYPE_NVME;
4830 }
4812 memcpy(fcport->port_name, e->u.new_sess.port_name, 4831 memcpy(fcport->port_name, e->u.new_sess.port_name,
4813 WWN_SIZE); 4832 WWN_SIZE);
4814 } else { 4833 } else {
@@ -4877,6 +4896,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4877 } 4896 }
4878 qlt_plogi_ack_unref(vha, pla); 4897 qlt_plogi_ack_unref(vha, pla);
4879 } else { 4898 } else {
4899 fc_port_t *dfcp = NULL;
4900
4880 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4901 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4881 tfcp = qla2x00_find_fcport_by_nportid(vha, 4902 tfcp = qla2x00_find_fcport_by_nportid(vha,
4882 &e->u.new_sess.id, 1); 4903 &e->u.new_sess.id, 1);
@@ -4899,11 +4920,13 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4899 default: 4920 default:
4900 fcport->login_pause = 1; 4921 fcport->login_pause = 1;
4901 tfcp->conflict = fcport; 4922 tfcp->conflict = fcport;
4902 qlt_schedule_sess_for_deletion(tfcp); 4923 dfcp = tfcp;
4903 break; 4924 break;
4904 } 4925 }
4905 } 4926 }
4906 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4927 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4928 if (dfcp)
4929 qlt_schedule_sess_for_deletion(tfcp);
4907 4930
4908 wwn = wwn_to_u64(fcport->node_name); 4931 wwn = wwn_to_u64(fcport->node_name);
4909 4932
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 896b2d8bd803..b49ac85f3de2 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1224,10 +1224,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
1224 } 1224 }
1225} 1225}
1226 1226
1227/* ha->tgt.sess_lock supposed to be held on entry */
1228void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1227void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1229{ 1228{
1230 struct qla_tgt *tgt = sess->tgt; 1229 struct qla_tgt *tgt = sess->tgt;
1230 struct qla_hw_data *ha = sess->vha->hw;
1231 unsigned long flags; 1231 unsigned long flags;
1232 1232
1233 if (sess->disc_state == DSC_DELETE_PEND) 1233 if (sess->disc_state == DSC_DELETE_PEND)
@@ -1244,16 +1244,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1244 return; 1244 return;
1245 } 1245 }
1246 1246
1247 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1247 if (sess->deleted == QLA_SESS_DELETED) 1248 if (sess->deleted == QLA_SESS_DELETED)
1248 sess->logout_on_delete = 0; 1249 sess->logout_on_delete = 0;
1249 1250
1250 spin_lock_irqsave(&sess->vha->work_lock, flags);
1251 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1251 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1252 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1252 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1253 return; 1253 return;
1254 } 1254 }
1255 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1255 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1256 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1256 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1257 1257
1258 sess->disc_state = DSC_DELETE_PEND; 1258 sess->disc_state = DSC_DELETE_PEND;
1259 1259
@@ -1262,13 +1262,10 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1262 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1262 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
1263 "Scheduling sess %p for deletion\n", sess); 1263 "Scheduling sess %p for deletion\n", sess);
1264 1264
1265 /* use cancel to push work element through before re-queue */
1266 cancel_work_sync(&sess->del_work);
1267 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1265 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
1268 queue_work(sess->vha->hw->wq, &sess->del_work); 1266 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1269} 1267}
1270 1268
1271/* ha->tgt.sess_lock supposed to be held on entry */
1272static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1269static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1273{ 1270{
1274 struct fc_port *sess; 1271 struct fc_port *sess;
@@ -1451,8 +1448,8 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1448 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1452 1449
1453 sess->local = 1; 1450 sess->local = 1;
1454 qlt_schedule_sess_for_deletion(sess);
1455 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1451 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1452 qlt_schedule_sess_for_deletion(sess);
1456} 1453}
1457 1454
1458static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1455static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1512,10 +1509,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
1512 * Lock is needed, because we still can get an incoming packet. 1509 * Lock is needed, because we still can get an incoming packet.
1513 */ 1510 */
1514 mutex_lock(&vha->vha_tgt.tgt_mutex); 1511 mutex_lock(&vha->vha_tgt.tgt_mutex);
1515 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1516 tgt->tgt_stop = 1; 1512 tgt->tgt_stop = 1;
1517 qlt_clear_tgt_db(tgt); 1513 qlt_clear_tgt_db(tgt);
1518 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1519 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1514 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1520 mutex_unlock(&qla_tgt_mutex); 1515 mutex_unlock(&qla_tgt_mutex);
1521 1516
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d042915ce895..ca53a5f785ee 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,7 +223,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
223 223
224static void scsi_eh_inc_host_failed(struct rcu_head *head) 224static void scsi_eh_inc_host_failed(struct rcu_head *head)
225{ 225{
226 struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); 226 struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
227 struct Scsi_Host *shost = scmd->device->host;
227 unsigned long flags; 228 unsigned long flags;
228 229
229 spin_lock_irqsave(shost->host_lock, flags); 230 spin_lock_irqsave(shost->host_lock, flags);
@@ -259,7 +260,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
259 * Ensure that all tasks observe the host state change before the 260 * Ensure that all tasks observe the host state change before the
260 * host_failed change. 261 * host_failed change.
261 */ 262 */
262 call_rcu(&shost->rcu, scsi_eh_inc_host_failed); 263 call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
263} 264}
264 265
265/** 266/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a86df9ca7d1c..c84f931388f2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -671,6 +671,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
671 if (!blk_rq_is_scsi(req)) { 671 if (!blk_rq_is_scsi(req)) {
672 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); 672 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
673 cmd->flags &= ~SCMD_INITIALIZED; 673 cmd->flags &= ~SCMD_INITIALIZED;
674 destroy_rcu_head(&cmd->rcu);
674 } 675 }
675 676
676 if (req->mq_ctx) { 677 if (req->mq_ctx) {
@@ -720,6 +721,8 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
720 int result) 721 int result)
721{ 722{
722 switch (host_byte(result)) { 723 switch (host_byte(result)) {
724 case DID_OK:
725 return BLK_STS_OK;
723 case DID_TRANSPORT_FAILFAST: 726 case DID_TRANSPORT_FAILFAST:
724 return BLK_STS_TRANSPORT; 727 return BLK_STS_TRANSPORT;
725 case DID_TARGET_FAILURE: 728 case DID_TARGET_FAILURE:
@@ -1151,6 +1154,7 @@ static void scsi_initialize_rq(struct request *rq)
1151 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); 1154 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
1152 1155
1153 scsi_req_init(&cmd->req); 1156 scsi_req_init(&cmd->req);
1157 init_rcu_head(&cmd->rcu);
1154 cmd->jiffies_at_alloc = jiffies; 1158 cmd->jiffies_at_alloc = jiffies;
1155 cmd->retries = 0; 1159 cmd->retries = 0;
1156} 1160}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bff21e636ddd..3541caf3fceb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2595,6 +2595,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2595 int res; 2595 int res;
2596 struct scsi_device *sdp = sdkp->device; 2596 struct scsi_device *sdp = sdkp->device;
2597 struct scsi_mode_data data; 2597 struct scsi_mode_data data;
2598 int disk_ro = get_disk_ro(sdkp->disk);
2598 int old_wp = sdkp->write_prot; 2599 int old_wp = sdkp->write_prot;
2599 2600
2600 set_disk_ro(sdkp->disk, 0); 2601 set_disk_ro(sdkp->disk, 0);
@@ -2635,7 +2636,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2635 "Test WP failed, assume Write Enabled\n"); 2636 "Test WP failed, assume Write Enabled\n");
2636 } else { 2637 } else {
2637 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2638 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2638 set_disk_ro(sdkp->disk, sdkp->write_prot); 2639 set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
2639 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2640 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2640 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2641 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2641 sdkp->write_prot ? "on" : "off"); 2642 sdkp->write_prot ? "on" : "off");
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6c348a211ebb..89cf4498f535 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -403,7 +403,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
403 */ 403 */
404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
405{ 405{
406 u64 zone_blocks; 406 u64 zone_blocks = 0;
407 sector_t block = 0; 407 sector_t block = 0;
408 unsigned char *buf; 408 unsigned char *buf;
409 unsigned char *rec; 409 unsigned char *rec;
@@ -421,10 +421,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
421 421
422 /* Do a report zone to get the same field */ 422 /* Do a report zone to get the same field */
423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); 423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
424 if (ret) { 424 if (ret)
425 zone_blocks = 0; 425 goto out_free;
426 goto out;
427 }
428 426
429 same = buf[4] & 0x0f; 427 same = buf[4] & 0x0f;
430 if (same > 0) { 428 if (same > 0) {
@@ -464,7 +462,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
464 ret = sd_zbc_report_zones(sdkp, buf, 462 ret = sd_zbc_report_zones(sdkp, buf,
465 SD_ZBC_BUF_SIZE, block); 463 SD_ZBC_BUF_SIZE, block);
466 if (ret) 464 if (ret)
467 return ret; 465 goto out_free;
468 } 466 }
469 467
470 } while (block < sdkp->capacity); 468 } while (block < sdkp->capacity);
@@ -472,35 +470,32 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
472 zone_blocks = sdkp->zone_blocks; 470 zone_blocks = sdkp->zone_blocks;
473 471
474out: 472out:
475 kfree(buf);
476
477 if (!zone_blocks) { 473 if (!zone_blocks) {
478 if (sdkp->first_scan) 474 if (sdkp->first_scan)
479 sd_printk(KERN_NOTICE, sdkp, 475 sd_printk(KERN_NOTICE, sdkp,
480 "Devices with non constant zone " 476 "Devices with non constant zone "
481 "size are not supported\n"); 477 "size are not supported\n");
482 return -ENODEV; 478 ret = -ENODEV;
483 } 479 } else if (!is_power_of_2(zone_blocks)) {
484
485 if (!is_power_of_2(zone_blocks)) {
486 if (sdkp->first_scan) 480 if (sdkp->first_scan)
487 sd_printk(KERN_NOTICE, sdkp, 481 sd_printk(KERN_NOTICE, sdkp,
488 "Devices with non power of 2 zone " 482 "Devices with non power of 2 zone "
489 "size are not supported\n"); 483 "size are not supported\n");
490 return -ENODEV; 484 ret = -ENODEV;
491 } 485 } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
492
493 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
494 if (sdkp->first_scan) 486 if (sdkp->first_scan)
495 sd_printk(KERN_NOTICE, sdkp, 487 sd_printk(KERN_NOTICE, sdkp,
496 "Zone size too large\n"); 488 "Zone size too large\n");
497 return -ENODEV; 489 ret = -ENODEV;
490 } else {
491 sdkp->zone_blocks = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
498 } 493 }
499 494
500 sdkp->zone_blocks = zone_blocks; 495out_free:
501 sdkp->zone_shift = ilog2(zone_blocks); 496 kfree(buf);
502 497
503 return 0; 498 return ret;
504} 499}
505 500
506/** 501/**
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 6be5ab32c94f..8c51d628b52e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1311,7 +1311,8 @@ static int storvsc_do_io(struct hv_device *device,
1311 */ 1311 */
1312 cpumask_and(&alloced_mask, &stor_device->alloced_cpus, 1312 cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
1313 cpumask_of_node(cpu_to_node(q_num))); 1313 cpumask_of_node(cpu_to_node(q_num)));
1314 for_each_cpu(tgt_cpu, &alloced_mask) { 1314 for_each_cpu_wrap(tgt_cpu, &alloced_mask,
1315 outgoing_channel->target_cpu + 1) {
1315 if (tgt_cpu != outgoing_channel->target_cpu) { 1316 if (tgt_cpu != outgoing_channel->target_cpu) {
1316 outgoing_channel = 1317 outgoing_channel =
1317 stor_device->stor_chns[tgt_cpu]; 1318 stor_device->stor_chns[tgt_cpu];
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index e4f5bb056fd2..ba3cfa8e279b 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2443,39 +2443,21 @@ struct cgr_comp {
2443 struct completion completion; 2443 struct completion completion;
2444}; 2444};
2445 2445
2446static int qman_delete_cgr_thread(void *p) 2446static void qman_delete_cgr_smp_call(void *p)
2447{ 2447{
2448 struct cgr_comp *cgr_comp = (struct cgr_comp *)p; 2448 qman_delete_cgr((struct qman_cgr *)p);
2449 int ret;
2450
2451 ret = qman_delete_cgr(cgr_comp->cgr);
2452 complete(&cgr_comp->completion);
2453
2454 return ret;
2455} 2449}
2456 2450
2457void qman_delete_cgr_safe(struct qman_cgr *cgr) 2451void qman_delete_cgr_safe(struct qman_cgr *cgr)
2458{ 2452{
2459 struct task_struct *thread;
2460 struct cgr_comp cgr_comp;
2461
2462 preempt_disable(); 2453 preempt_disable();
2463 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { 2454 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2464 init_completion(&cgr_comp.completion); 2455 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2465 cgr_comp.cgr = cgr; 2456 qman_delete_cgr_smp_call, cgr, true);
2466 thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
2467 "cgr_del");
2468
2469 if (IS_ERR(thread))
2470 goto out;
2471
2472 kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
2473 wake_up_process(thread);
2474 wait_for_completion(&cgr_comp.completion);
2475 preempt_enable(); 2457 preempt_enable();
2476 return; 2458 return;
2477 } 2459 }
2478out: 2460
2479 qman_delete_cgr(cgr); 2461 qman_delete_cgr(cgr);
2480 preempt_enable(); 2462 preempt_enable();
2481} 2463}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 6dbba5aff191..86580b6df33d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -326,24 +326,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
326 mutex_lock(&ashmem_mutex); 326 mutex_lock(&ashmem_mutex);
327 327
328 if (asma->size == 0) { 328 if (asma->size == 0) {
329 ret = -EINVAL; 329 mutex_unlock(&ashmem_mutex);
330 goto out; 330 return -EINVAL;
331 } 331 }
332 332
333 if (!asma->file) { 333 if (!asma->file) {
334 ret = -EBADF; 334 mutex_unlock(&ashmem_mutex);
335 goto out; 335 return -EBADF;
336 } 336 }
337 337
338 mutex_unlock(&ashmem_mutex);
339
338 ret = vfs_llseek(asma->file, offset, origin); 340 ret = vfs_llseek(asma->file, offset, origin);
339 if (ret < 0) 341 if (ret < 0)
340 goto out; 342 return ret;
341 343
342 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 344 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
343 file->f_pos = asma->file->f_pos; 345 file->f_pos = asma->file->f_pos;
344
345out:
346 mutex_unlock(&ashmem_mutex);
347 return ret; 346 return ret;
348} 347}
349 348
@@ -702,16 +701,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
702 size_t pgstart, pgend; 701 size_t pgstart, pgend;
703 int ret = -EINVAL; 702 int ret = -EINVAL;
704 703
704 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
705 return -EFAULT;
706
705 mutex_lock(&ashmem_mutex); 707 mutex_lock(&ashmem_mutex);
706 708
707 if (unlikely(!asma->file)) 709 if (unlikely(!asma->file))
708 goto out_unlock; 710 goto out_unlock;
709 711
710 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
711 ret = -EFAULT;
712 goto out_unlock;
713 }
714
715 /* per custom, you can pass zero for len to mean "everything onward" */ 712 /* per custom, you can pass zero for len to mean "everything onward" */
716 if (!pin.len) 713 if (!pin.len)
717 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 714 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e618a87521a3..9d733471ca2e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -475,8 +475,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
475 struct comedi_cmd *cmd = &async->cmd; 475 struct comedi_cmd *cmd = &async->cmd;
476 476
477 if (cmd->stop_src == TRIG_COUNT) { 477 if (cmd->stop_src == TRIG_COUNT) {
478 unsigned int nscans = nsamples / cmd->scan_end_arg; 478 unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
479 unsigned int scans_left = __comedi_nscans_left(s, nscans);
480 unsigned int scan_pos = 479 unsigned int scan_pos =
481 comedi_bytes_to_samples(s, async->scan_progress); 480 comedi_bytes_to_samples(s, async->scan_progress);
482 unsigned long long samples_left = 0; 481 unsigned long long samples_left = 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5c0e59e8fe46..cbe98bc2b998 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2180,6 +2180,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2180 } 2180 }
2181 if (tty_hung_up_p(file)) 2181 if (tty_hung_up_p(file))
2182 break; 2182 break;
2183 /*
2184 * Abort readers for ttys which never actually
2185 * get hung up. See __tty_hangup().
2186 */
2187 if (test_bit(TTY_HUPPING, &tty->flags))
2188 break;
2183 if (!timeout) 2189 if (!timeout)
2184 break; 2190 break;
2185 if (file->f_flags & O_NONBLOCK) { 2191 if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 54adf8d56350..a93f77ab3da0 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3387,11 +3387,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev)
3387 /* 3387 /*
3388 * If it is not a communications device or the programming 3388 * If it is not a communications device or the programming
3389 * interface is greater than 6, give up. 3389 * interface is greater than 6, give up.
3390 *
3391 * (Should we try to make guesses for multiport serial devices
3392 * later?)
3393 */ 3390 */
3394 if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && 3391 if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) &&
3392 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) &&
3395 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || 3393 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) ||
3396 (dev->class & 0xff) > 6) 3394 (dev->class & 0xff) > 6)
3397 return -ENODEV; 3395 return -ENODEV;
@@ -3428,6 +3426,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
3428{ 3426{
3429 int num_iomem, num_port, first_port = -1, i; 3427 int num_iomem, num_port, first_port = -1, i;
3430 3428
3429 /*
3430 * Should we try to make guesses for multiport serial devices later?
3431 */
3432 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL)
3433 return -ENODEV;
3434
3431 num_iomem = num_port = 0; 3435 num_iomem = num_port = 0;
3432 for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { 3436 for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
3433 if (pci_resource_flags(dev, i) & IORESOURCE_IO) { 3437 if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
@@ -4699,6 +4703,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
4699 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ 4703 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
4700 pbn_b2_4_115200 }, 4704 pbn_b2_4_115200 },
4701 /* 4705 /*
4706 * BrainBoxes UC-260
4707 */
4708 { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
4709 PCI_ANY_ID, PCI_ANY_ID,
4710 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4711 pbn_b2_4_115200 },
4712 { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
4713 PCI_ANY_ID, PCI_ANY_ID,
4714 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4715 pbn_b2_4_115200 },
4716 /*
4702 * Perle PCI-RAS cards 4717 * Perle PCI-RAS cards
4703 */ 4718 */
4704 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, 4719 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index df46a9e88c34..e287fe8f10fc 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1734,6 +1734,7 @@ static void atmel_get_ip_name(struct uart_port *port)
1734 switch (version) { 1734 switch (version) {
1735 case 0x302: 1735 case 0x302:
1736 case 0x10213: 1736 case 0x10213:
1737 case 0x10302:
1737 dev_dbg(port->dev, "This version is usart\n"); 1738 dev_dbg(port->dev, "This version is usart\n");
1738 atmel_port->has_frac_baudrate = true; 1739 atmel_port->has_frac_baudrate = true;
1739 atmel_port->has_hw_timer = true; 1740 atmel_port->has_hw_timer = true;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 870e84fb6e39..a24278380fec 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -245,11 +245,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
245 } 245 }
246 port->mapbase = addr; 246 port->mapbase = addr;
247 port->uartclk = BASE_BAUD * 16; 247 port->uartclk = BASE_BAUD * 16;
248 port->membase = earlycon_map(port->mapbase, SZ_4K);
249 248
250 val = of_get_flat_dt_prop(node, "reg-offset", NULL); 249 val = of_get_flat_dt_prop(node, "reg-offset", NULL);
251 if (val) 250 if (val)
252 port->mapbase += be32_to_cpu(*val); 251 port->mapbase += be32_to_cpu(*val);
252 port->membase = earlycon_map(port->mapbase, SZ_4K);
253
253 val = of_get_flat_dt_prop(node, "reg-shift", NULL); 254 val = of_get_flat_dt_prop(node, "reg-shift", NULL);
254 if (val) 255 if (val)
255 port->regshift = be32_to_cpu(*val); 256 port->regshift = be32_to_cpu(*val);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 1d7ca382bc12..a33c685af990 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2093,7 +2093,7 @@ static int serial_imx_probe(struct platform_device *pdev)
2093 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); 2093 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
2094 2094
2095 if (sport->port.rs485.flags & SER_RS485_ENABLED && 2095 if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2096 (!sport->have_rtscts || !sport->have_rtsgpio)) 2096 (!sport->have_rtscts && !sport->have_rtsgpio))
2097 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2097 dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
2098 2098
2099 imx_rs485_config(&sport->port, &sport->port.rs485); 2099 imx_rs485_config(&sport->port, &sport->port.rs485);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index c8dde56b532b..35b9201db3b4 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1144,6 +1144,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
1144 uport->ops->config_port(uport, flags); 1144 uport->ops->config_port(uport, flags);
1145 1145
1146 ret = uart_startup(tty, state, 1); 1146 ret = uart_startup(tty, state, 1);
1147 if (ret == 0)
1148 tty_port_set_initialized(port, true);
1147 if (ret > 0) 1149 if (ret > 0)
1148 ret = 0; 1150 ret = 0;
1149 } 1151 }
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7257c078e155..44adf9db38f8 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -885,6 +885,8 @@ static void sci_receive_chars(struct uart_port *port)
885 /* Tell the rest of the system the news. New characters! */ 885 /* Tell the rest of the system the news. New characters! */
886 tty_flip_buffer_push(tport); 886 tty_flip_buffer_push(tport);
887 } else { 887 } else {
888 /* TTY buffers full; read from RX reg to prevent lockup */
889 serial_port_in(port, SCxRDR);
888 serial_port_in(port, SCxSR); /* dummy read */ 890 serial_port_in(port, SCxSR); /* dummy read */
889 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); 891 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
890 } 892 }
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index eb9133b472f4..63114ea35ec1 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -586,6 +586,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
586 return; 586 return;
587 } 587 }
588 588
589 /*
590 * Some console devices aren't actually hung up for technical and
591 * historical reasons, which can lead to indefinite interruptible
592 * sleep in n_tty_read(). The following explicitly tells
593 * n_tty_read() to abort readers.
594 */
595 set_bit(TTY_HUPPING, &tty->flags);
596
589 /* inuse_filps is protected by the single tty lock, 597 /* inuse_filps is protected by the single tty lock,
590 this really needs to change if we want to flush the 598 this really needs to change if we want to flush the
591 workqueue with the lock held */ 599 workqueue with the lock held */
@@ -640,6 +648,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
640 * from the ldisc side, which is now guaranteed. 648 * from the ldisc side, which is now guaranteed.
641 */ 649 */
642 set_bit(TTY_HUPPED, &tty->flags); 650 set_bit(TTY_HUPPED, &tty->flags);
651 clear_bit(TTY_HUPPING, &tty->flags);
643 tty_unlock(tty); 652 tty_unlock(tty);
644 653
645 if (f) 654 if (f)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c64cf6c4a83d..0c11d40a12bc 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -151,6 +151,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
151 151
152 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); 152 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
153 153
154 /* Linger a bit, prior to the next control message. */
155 if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
156 msleep(200);
157
154 kfree(dr); 158 kfree(dr);
155 159
156 return ret; 160 return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f4a548471f0f..54b019e267c5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -230,7 +230,8 @@ static const struct usb_device_id usb_quirk_list[] = {
230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
231 231
232 /* Corsair Strafe RGB */ 232 /* Corsair Strafe RGB */
233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, 233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
234 USB_QUIRK_DELAY_CTRL_MSG },
234 235
235 /* Corsair K70 LUX */ 236 /* Corsair K70 LUX */
236 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 237 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 03fd20f0b496..c4a47496d2fb 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
137 p->activate_stm_fs_transceiver = true; 137 p->activate_stm_fs_transceiver = true;
138} 138}
139 139
140static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) 140static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
141{ 141{
142 struct dwc2_core_params *p = &hsotg->params; 142 struct dwc2_core_params *p = &hsotg->params;
143 143
@@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {
164 { .compatible = "st,stm32f4x9-fsotg", 164 { .compatible = "st,stm32f4x9-fsotg",
165 .data = dwc2_set_stm32f4x9_fsotg_params }, 165 .data = dwc2_set_stm32f4x9_fsotg_params },
166 { .compatible = "st,stm32f4x9-hsotg" }, 166 { .compatible = "st,stm32f4x9-hsotg" },
167 { .compatible = "st,stm32f7xx-hsotg", 167 { .compatible = "st,stm32f7-hsotg",
168 .data = dwc2_set_stm32f7xx_hsotg_params }, 168 .data = dwc2_set_stm32f7_hsotg_params },
169 {}, 169 {},
170}; 170};
171MODULE_DEVICE_TABLE(of, dwc2_of_match_table); 171MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f1d838a4acd6..e94bf91cc58a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -175,7 +175,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
175 dwc->desired_dr_role = mode; 175 dwc->desired_dr_role = mode;
176 spin_unlock_irqrestore(&dwc->lock, flags); 176 spin_unlock_irqrestore(&dwc->lock, flags);
177 177
178 queue_work(system_power_efficient_wq, &dwc->drd_work); 178 queue_work(system_freezable_wq, &dwc->drd_work);
179} 179}
180 180
181u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 181u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index c2592d883f67..d2428a9e8900 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1538,7 +1538,6 @@ ffs_fs_kill_sb(struct super_block *sb)
1538 if (sb->s_fs_info) { 1538 if (sb->s_fs_info) {
1539 ffs_release_dev(sb->s_fs_info); 1539 ffs_release_dev(sb->s_fs_info);
1540 ffs_data_closed(sb->s_fs_info); 1540 ffs_data_closed(sb->s_fs_info);
1541 ffs_data_put(sb->s_fs_info);
1542 } 1541 }
1543} 1542}
1544 1543
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 84f88fa411cd..d088c340e4d0 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -447,7 +447,8 @@ static int ohci_init (struct ohci_hcd *ohci)
447 struct usb_hcd *hcd = ohci_to_hcd(ohci); 447 struct usb_hcd *hcd = ohci_to_hcd(ohci);
448 448
449 /* Accept arbitrarily long scatter-gather lists */ 449 /* Accept arbitrarily long scatter-gather lists */
450 hcd->self.sg_tablesize = ~0; 450 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
451 hcd->self.sg_tablesize = ~0;
451 452
452 if (distrust_firmware) 453 if (distrust_firmware)
453 ohci->flags |= OHCI_QUIRK_HUB_POWER; 454 ohci->flags |= OHCI_QUIRK_HUB_POWER;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index a1ab8acf39ba..c359bae7b754 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -328,13 +328,14 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, 328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
329 gfp_t gfp_flags) 329 gfp_t gfp_flags)
330{ 330{
331 unsigned long flags;
331 struct xhci_dbc *dbc = dep->dbc; 332 struct xhci_dbc *dbc = dep->dbc;
332 int ret = -ESHUTDOWN; 333 int ret = -ESHUTDOWN;
333 334
334 spin_lock(&dbc->lock); 335 spin_lock_irqsave(&dbc->lock, flags);
335 if (dbc->state == DS_CONFIGURED) 336 if (dbc->state == DS_CONFIGURED)
336 ret = dbc_ep_do_queue(dep, req); 337 ret = dbc_ep_do_queue(dep, req);
337 spin_unlock(&dbc->lock); 338 spin_unlock_irqrestore(&dbc->lock, flags);
338 339
339 mod_delayed_work(system_wq, &dbc->event_work, 0); 340 mod_delayed_work(system_wq, &dbc->event_work, 0);
340 341
@@ -521,15 +522,16 @@ static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
521static int xhci_dbc_start(struct xhci_hcd *xhci) 522static int xhci_dbc_start(struct xhci_hcd *xhci)
522{ 523{
523 int ret; 524 int ret;
525 unsigned long flags;
524 struct xhci_dbc *dbc = xhci->dbc; 526 struct xhci_dbc *dbc = xhci->dbc;
525 527
526 WARN_ON(!dbc); 528 WARN_ON(!dbc);
527 529
528 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); 530 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
529 531
530 spin_lock(&dbc->lock); 532 spin_lock_irqsave(&dbc->lock, flags);
531 ret = xhci_do_dbc_start(xhci); 533 ret = xhci_do_dbc_start(xhci);
532 spin_unlock(&dbc->lock); 534 spin_unlock_irqrestore(&dbc->lock, flags);
533 535
534 if (ret) { 536 if (ret) {
535 pm_runtime_put(xhci_to_hcd(xhci)->self.controller); 537 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
@@ -541,6 +543,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
541 543
542static void xhci_dbc_stop(struct xhci_hcd *xhci) 544static void xhci_dbc_stop(struct xhci_hcd *xhci)
543{ 545{
546 unsigned long flags;
544 struct xhci_dbc *dbc = xhci->dbc; 547 struct xhci_dbc *dbc = xhci->dbc;
545 struct dbc_port *port = &dbc->port; 548 struct dbc_port *port = &dbc->port;
546 549
@@ -551,9 +554,9 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
551 if (port->registered) 554 if (port->registered)
552 xhci_dbc_tty_unregister_device(xhci); 555 xhci_dbc_tty_unregister_device(xhci);
553 556
554 spin_lock(&dbc->lock); 557 spin_lock_irqsave(&dbc->lock, flags);
555 xhci_do_dbc_stop(xhci); 558 xhci_do_dbc_stop(xhci);
556 spin_unlock(&dbc->lock); 559 spin_unlock_irqrestore(&dbc->lock, flags);
557 560
558 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 561 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
559} 562}
@@ -779,14 +782,15 @@ static void xhci_dbc_handle_events(struct work_struct *work)
779 int ret; 782 int ret;
780 enum evtreturn evtr; 783 enum evtreturn evtr;
781 struct xhci_dbc *dbc; 784 struct xhci_dbc *dbc;
785 unsigned long flags;
782 struct xhci_hcd *xhci; 786 struct xhci_hcd *xhci;
783 787
784 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); 788 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
785 xhci = dbc->xhci; 789 xhci = dbc->xhci;
786 790
787 spin_lock(&dbc->lock); 791 spin_lock_irqsave(&dbc->lock, flags);
788 evtr = xhci_dbc_do_handle_events(dbc); 792 evtr = xhci_dbc_do_handle_events(dbc);
789 spin_unlock(&dbc->lock); 793 spin_unlock_irqrestore(&dbc->lock, flags);
790 794
791 switch (evtr) { 795 switch (evtr) {
792 case EVT_GSER: 796 case EVT_GSER:
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 8d47b6fbf973..75f0b92694ba 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -92,21 +92,23 @@ static void dbc_start_rx(struct dbc_port *port)
92static void 92static void
93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) 93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
94{ 94{
95 unsigned long flags;
95 struct xhci_dbc *dbc = xhci->dbc; 96 struct xhci_dbc *dbc = xhci->dbc;
96 struct dbc_port *port = &dbc->port; 97 struct dbc_port *port = &dbc->port;
97 98
98 spin_lock(&port->port_lock); 99 spin_lock_irqsave(&port->port_lock, flags);
99 list_add_tail(&req->list_pool, &port->read_queue); 100 list_add_tail(&req->list_pool, &port->read_queue);
100 tasklet_schedule(&port->push); 101 tasklet_schedule(&port->push);
101 spin_unlock(&port->port_lock); 102 spin_unlock_irqrestore(&port->port_lock, flags);
102} 103}
103 104
104static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) 105static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
105{ 106{
107 unsigned long flags;
106 struct xhci_dbc *dbc = xhci->dbc; 108 struct xhci_dbc *dbc = xhci->dbc;
107 struct dbc_port *port = &dbc->port; 109 struct dbc_port *port = &dbc->port;
108 110
109 spin_lock(&port->port_lock); 111 spin_lock_irqsave(&port->port_lock, flags);
110 list_add(&req->list_pool, &port->write_pool); 112 list_add(&req->list_pool, &port->write_pool);
111 switch (req->status) { 113 switch (req->status) {
112 case 0: 114 case 0:
@@ -119,7 +121,7 @@ static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
119 req->status); 121 req->status);
120 break; 122 break;
121 } 123 }
122 spin_unlock(&port->port_lock); 124 spin_unlock_irqrestore(&port->port_lock, flags);
123} 125}
124 126
125static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) 127static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
@@ -327,12 +329,13 @@ static void dbc_rx_push(unsigned long _port)
327{ 329{
328 struct dbc_request *req; 330 struct dbc_request *req;
329 struct tty_struct *tty; 331 struct tty_struct *tty;
332 unsigned long flags;
330 bool do_push = false; 333 bool do_push = false;
331 bool disconnect = false; 334 bool disconnect = false;
332 struct dbc_port *port = (void *)_port; 335 struct dbc_port *port = (void *)_port;
333 struct list_head *queue = &port->read_queue; 336 struct list_head *queue = &port->read_queue;
334 337
335 spin_lock_irq(&port->port_lock); 338 spin_lock_irqsave(&port->port_lock, flags);
336 tty = port->port.tty; 339 tty = port->port.tty;
337 while (!list_empty(queue)) { 340 while (!list_empty(queue)) {
338 req = list_first_entry(queue, struct dbc_request, list_pool); 341 req = list_first_entry(queue, struct dbc_request, list_pool);
@@ -392,16 +395,17 @@ static void dbc_rx_push(unsigned long _port)
392 if (!disconnect) 395 if (!disconnect)
393 dbc_start_rx(port); 396 dbc_start_rx(port);
394 397
395 spin_unlock_irq(&port->port_lock); 398 spin_unlock_irqrestore(&port->port_lock, flags);
396} 399}
397 400
398static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) 401static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
399{ 402{
403 unsigned long flags;
400 struct dbc_port *port = container_of(_port, struct dbc_port, port); 404 struct dbc_port *port = container_of(_port, struct dbc_port, port);
401 405
402 spin_lock_irq(&port->port_lock); 406 spin_lock_irqsave(&port->port_lock, flags);
403 dbc_start_rx(port); 407 dbc_start_rx(port);
404 spin_unlock_irq(&port->port_lock); 408 spin_unlock_irqrestore(&port->port_lock, flags);
405 409
406 return 0; 410 return 0;
407} 411}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5262fa571a5d..d9f831b67e57 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -126,6 +126,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
127 xhci->quirks |= XHCI_AMD_PLL_FIX; 127 xhci->quirks |= XHCI_AMD_PLL_FIX;
128 128
129 if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
130 xhci->quirks |= XHCI_SUSPEND_DELAY;
131
129 if (pdev->vendor == PCI_VENDOR_ID_AMD) 132 if (pdev->vendor == PCI_VENDOR_ID_AMD)
130 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 133 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
131 134
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6f038306c14d..6652e2d5bd2e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -360,7 +360,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
360{ 360{
361 struct usb_hcd *hcd = dev_get_drvdata(dev); 361 struct usb_hcd *hcd = dev_get_drvdata(dev);
362 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
363 int ret;
364 363
365 /* 364 /*
366 * xhci_suspend() needs `do_wakeup` to know whether host is allowed 365 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
@@ -370,12 +369,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
370 * reconsider this when xhci_plat_suspend enlarges its scope, e.g., 369 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
371 * also applies to runtime suspend. 370 * also applies to runtime suspend.
372 */ 371 */
373 ret = xhci_suspend(xhci, device_may_wakeup(dev)); 372 return xhci_suspend(xhci, device_may_wakeup(dev));
374
375 if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
376 clk_disable_unprepare(xhci->clk);
377
378 return ret;
379} 373}
380 374
381static int __maybe_unused xhci_plat_resume(struct device *dev) 375static int __maybe_unused xhci_plat_resume(struct device *dev)
@@ -384,9 +378,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
384 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 378 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
385 int ret; 379 int ret;
386 380
387 if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
388 clk_prepare_enable(xhci->clk);
389
390 ret = xhci_priv_resume_quirk(hcd); 381 ret = xhci_priv_resume_quirk(hcd);
391 if (ret) 382 if (ret)
392 return ret; 383 return ret;
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index f0b559660007..f33ffc2bc4ed 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -83,6 +83,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = {
83 .soc_id = "r8a7796", 83 .soc_id = "r8a7796",
84 .data = (void *)RCAR_XHCI_FIRMWARE_V3, 84 .data = (void *)RCAR_XHCI_FIRMWARE_V3,
85 }, 85 },
86 {
87 .soc_id = "r8a77965",
88 .data = (void *)RCAR_XHCI_FIRMWARE_V3,
89 },
86 { /* sentinel */ }, 90 { /* sentinel */ },
87}; 91};
88 92
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 25d4b748a56f..5d37700ae4b0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -877,6 +877,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
878 del_timer_sync(&xhci->shared_hcd->rh_timer); 878 del_timer_sync(&xhci->shared_hcd->rh_timer);
879 879
880 if (xhci->quirks & XHCI_SUSPEND_DELAY)
881 usleep_range(1000, 1500);
882
880 spin_lock_irq(&xhci->lock); 883 spin_lock_irq(&xhci->lock);
881 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
882 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 885 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e4d7d3d06a75..866e141d4972 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -718,11 +718,12 @@ struct xhci_ep_ctx {
718/* bits 10:14 are Max Primary Streams */ 718/* bits 10:14 are Max Primary Streams */
719/* bit 15 is Linear Stream Array */ 719/* bit 15 is Linear Stream Array */
720/* Interval - period between requests to an endpoint - 125u increments. */ 720/* Interval - period between requests to an endpoint - 125u increments. */
721#define EP_INTERVAL(p) (((p) & 0xff) << 16) 721#define EP_INTERVAL(p) (((p) & 0xff) << 16)
722#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) 722#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
723#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) 723#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
724#define EP_MAXPSTREAMS_MASK (0x1f << 10) 724#define EP_MAXPSTREAMS_MASK (0x1f << 10)
725#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 725#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
726#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10)
726/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ 727/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
727#define EP_HAS_LSA (1 << 15) 728#define EP_HAS_LSA (1 << 15)
728/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ 729/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
@@ -1825,6 +1826,7 @@ struct xhci_hcd {
1825#define XHCI_U2_DISABLE_WAKE (1 << 27) 1826#define XHCI_U2_DISABLE_WAKE (1 << 27)
1826#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) 1827#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1827#define XHCI_HW_LPM_DISABLE (1 << 29) 1828#define XHCI_HW_LPM_DISABLE (1 << 29)
1829#define XHCI_SUSPEND_DELAY (1 << 30)
1828 1830
1829 unsigned int num_active_eps; 1831 unsigned int num_active_eps;
1830 unsigned int limit_active_eps; 1832 unsigned int limit_active_eps;
@@ -2549,21 +2551,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
2549 u8 burst; 2551 u8 burst;
2550 u8 cerr; 2552 u8 cerr;
2551 u8 mult; 2553 u8 mult;
2552 u8 lsa; 2554
2553 u8 hid; 2555 bool lsa;
2556 bool hid;
2554 2557
2555 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | 2558 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
2556 CTX_TO_MAX_ESIT_PAYLOAD(tx_info); 2559 CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
2557 2560
2558 ep_state = info & EP_STATE_MASK; 2561 ep_state = info & EP_STATE_MASK;
2559 max_pstr = info & EP_MAXPSTREAMS_MASK; 2562 max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
2560 interval = CTX_TO_EP_INTERVAL(info); 2563 interval = CTX_TO_EP_INTERVAL(info);
2561 mult = CTX_TO_EP_MULT(info) + 1; 2564 mult = CTX_TO_EP_MULT(info) + 1;
2562 lsa = info & EP_HAS_LSA; 2565 lsa = !!(info & EP_HAS_LSA);
2563 2566
2564 cerr = (info2 & (3 << 1)) >> 1; 2567 cerr = (info2 & (3 << 1)) >> 1;
2565 ep_type = CTX_TO_EP_TYPE(info2); 2568 ep_type = CTX_TO_EP_TYPE(info2);
2566 hid = info2 & (1 << 7); 2569 hid = !!(info2 & (1 << 7));
2567 burst = CTX_TO_MAX_BURST(info2); 2570 burst = CTX_TO_MAX_BURST(info2);
2568 maxp = MAX_PACKET_DECODED(info2); 2571 maxp = MAX_PACKET_DECODED(info2);
2569 2572
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index f5e1bb5e5217..984f7e12a6a5 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -85,6 +85,8 @@ struct mon_reader_text {
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 int printf_size; 87 int printf_size;
88 size_t printf_offset;
89 size_t printf_togo;
88 char *printf_buf; 90 char *printf_buf;
89 struct mutex printf_lock; 91 struct mutex printf_lock;
90 92
@@ -376,75 +378,103 @@ err_alloc:
376 return rc; 378 return rc;
377} 379}
378 380
379/* 381static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
380 * For simplicity, we read one record in one system call and throw out 382 char __user * const buf, const size_t nbytes)
381 * what does not fit. This means that the following does not work: 383{
382 * dd if=/dbg/usbmon/0t bs=10 384 const size_t togo = min(nbytes, rp->printf_togo);
383 * Also, we do not allow seeks and do not bother advancing the offset. 385
384 */ 386 if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
387 return -EFAULT;
388 rp->printf_togo -= togo;
389 rp->printf_offset += togo;
390 return togo;
391}
392
393/* ppos is not advanced since the llseek operation is not permitted. */
385static ssize_t mon_text_read_t(struct file *file, char __user *buf, 394static ssize_t mon_text_read_t(struct file *file, char __user *buf,
386 size_t nbytes, loff_t *ppos) 395 size_t nbytes, loff_t *ppos)
387{ 396{
388 struct mon_reader_text *rp = file->private_data; 397 struct mon_reader_text *rp = file->private_data;
389 struct mon_event_text *ep; 398 struct mon_event_text *ep;
390 struct mon_text_ptr ptr; 399 struct mon_text_ptr ptr;
400 ssize_t ret;
391 401
392 ep = mon_text_read_wait(rp, file);
393 if (IS_ERR(ep))
394 return PTR_ERR(ep);
395 mutex_lock(&rp->printf_lock); 402 mutex_lock(&rp->printf_lock);
396 ptr.cnt = 0; 403
397 ptr.pbuf = rp->printf_buf; 404 if (rp->printf_togo == 0) {
398 ptr.limit = rp->printf_size; 405
399 406 ep = mon_text_read_wait(rp, file);
400 mon_text_read_head_t(rp, &ptr, ep); 407 if (IS_ERR(ep)) {
401 mon_text_read_statset(rp, &ptr, ep); 408 mutex_unlock(&rp->printf_lock);
402 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, 409 return PTR_ERR(ep);
403 " %d", ep->length); 410 }
404 mon_text_read_data(rp, &ptr, ep); 411 ptr.cnt = 0;
405 412 ptr.pbuf = rp->printf_buf;
406 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 413 ptr.limit = rp->printf_size;
407 ptr.cnt = -EFAULT; 414
415 mon_text_read_head_t(rp, &ptr, ep);
416 mon_text_read_statset(rp, &ptr, ep);
417 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
418 " %d", ep->length);
419 mon_text_read_data(rp, &ptr, ep);
420
421 rp->printf_togo = ptr.cnt;
422 rp->printf_offset = 0;
423
424 kmem_cache_free(rp->e_slab, ep);
425 }
426
427 ret = mon_text_copy_to_user(rp, buf, nbytes);
408 mutex_unlock(&rp->printf_lock); 428 mutex_unlock(&rp->printf_lock);
409 kmem_cache_free(rp->e_slab, ep); 429 return ret;
410 return ptr.cnt;
411} 430}
412 431
432/* ppos is not advanced since the llseek operation is not permitted. */
413static ssize_t mon_text_read_u(struct file *file, char __user *buf, 433static ssize_t mon_text_read_u(struct file *file, char __user *buf,
414 size_t nbytes, loff_t *ppos) 434 size_t nbytes, loff_t *ppos)
415{ 435{
416 struct mon_reader_text *rp = file->private_data; 436 struct mon_reader_text *rp = file->private_data;
417 struct mon_event_text *ep; 437 struct mon_event_text *ep;
418 struct mon_text_ptr ptr; 438 struct mon_text_ptr ptr;
439 ssize_t ret;
419 440
420 ep = mon_text_read_wait(rp, file);
421 if (IS_ERR(ep))
422 return PTR_ERR(ep);
423 mutex_lock(&rp->printf_lock); 441 mutex_lock(&rp->printf_lock);
424 ptr.cnt = 0;
425 ptr.pbuf = rp->printf_buf;
426 ptr.limit = rp->printf_size;
427 442
428 mon_text_read_head_u(rp, &ptr, ep); 443 if (rp->printf_togo == 0) {
429 if (ep->type == 'E') { 444
430 mon_text_read_statset(rp, &ptr, ep); 445 ep = mon_text_read_wait(rp, file);
431 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { 446 if (IS_ERR(ep)) {
432 mon_text_read_isostat(rp, &ptr, ep); 447 mutex_unlock(&rp->printf_lock);
433 mon_text_read_isodesc(rp, &ptr, ep); 448 return PTR_ERR(ep);
434 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { 449 }
435 mon_text_read_intstat(rp, &ptr, ep); 450 ptr.cnt = 0;
436 } else { 451 ptr.pbuf = rp->printf_buf;
437 mon_text_read_statset(rp, &ptr, ep); 452 ptr.limit = rp->printf_size;
453
454 mon_text_read_head_u(rp, &ptr, ep);
455 if (ep->type == 'E') {
456 mon_text_read_statset(rp, &ptr, ep);
457 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
458 mon_text_read_isostat(rp, &ptr, ep);
459 mon_text_read_isodesc(rp, &ptr, ep);
460 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
461 mon_text_read_intstat(rp, &ptr, ep);
462 } else {
463 mon_text_read_statset(rp, &ptr, ep);
464 }
465 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
466 " %d", ep->length);
467 mon_text_read_data(rp, &ptr, ep);
468
469 rp->printf_togo = ptr.cnt;
470 rp->printf_offset = 0;
471
472 kmem_cache_free(rp->e_slab, ep);
438 } 473 }
439 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
440 " %d", ep->length);
441 mon_text_read_data(rp, &ptr, ep);
442 474
443 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 475 ret = mon_text_copy_to_user(rp, buf, nbytes);
444 ptr.cnt = -EFAULT;
445 mutex_unlock(&rp->printf_lock); 476 mutex_unlock(&rp->printf_lock);
446 kmem_cache_free(rp->e_slab, ep); 477 return ret;
447 return ptr.cnt;
448} 478}
449 479
450static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, 480static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index eef4ad578b31..4d723077be2b 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1756,6 +1756,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1756 int vbus; 1756 int vbus;
1757 u8 devctl; 1757 u8 devctl;
1758 1758
1759 pm_runtime_get_sync(dev);
1759 spin_lock_irqsave(&musb->lock, flags); 1760 spin_lock_irqsave(&musb->lock, flags);
1760 val = musb->a_wait_bcon; 1761 val = musb->a_wait_bcon;
1761 vbus = musb_platform_get_vbus_status(musb); 1762 vbus = musb_platform_get_vbus_status(musb);
@@ -1769,6 +1770,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1769 vbus = 0; 1770 vbus = 0;
1770 } 1771 }
1771 spin_unlock_irqrestore(&musb->lock, flags); 1772 spin_unlock_irqrestore(&musb->lock, flags);
1773 pm_runtime_put_sync(dev);
1772 1774
1773 return sprintf(buf, "Vbus %s, timeout %lu msec\n", 1775 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1774 vbus ? "on" : "off", val); 1776 vbus ? "on" : "off", val);
@@ -2471,11 +2473,11 @@ static int musb_remove(struct platform_device *pdev)
2471 musb_disable_interrupts(musb); 2473 musb_disable_interrupts(musb);
2472 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2474 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2473 spin_unlock_irqrestore(&musb->lock, flags); 2475 spin_unlock_irqrestore(&musb->lock, flags);
2476 musb_platform_exit(musb);
2474 2477
2475 pm_runtime_dont_use_autosuspend(musb->controller); 2478 pm_runtime_dont_use_autosuspend(musb->controller);
2476 pm_runtime_put_sync(musb->controller); 2479 pm_runtime_put_sync(musb->controller);
2477 pm_runtime_disable(musb->controller); 2480 pm_runtime_disable(musb->controller);
2478 musb_platform_exit(musb);
2479 musb_phy_callback = NULL; 2481 musb_phy_callback = NULL;
2480 if (musb->dma_controller) 2482 if (musb->dma_controller)
2481 musb_dma_controller_destroy(musb->dma_controller); 2483 musb_dma_controller_destroy(musb->dma_controller);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 3b1b9695177a..6034c39b67d1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf)
1076 return 0; 1076 return 0;
1077 1077
1078 err = uas_configure_endpoints(devinfo); 1078 err = uas_configure_endpoints(devinfo);
1079 if (err && err != ENODEV) 1079 if (err && err != -ENODEV)
1080 shost_printk(KERN_ERR, shost, 1080 shost_printk(KERN_ERR, shost,
1081 "%s: alloc streams error %d after reset", 1081 "%s: alloc streams error %d after reset",
1082 __func__, err); 1082 __func__, err);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 264af199aec8..747d3a9596d9 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2118,6 +2118,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2118 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2118 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2119 US_FL_BROKEN_FUA ), 2119 US_FL_BROKEN_FUA ),
2120 2120
2121/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
2122UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
2123 "JMicron",
2124 "USB to ATA/ATAPI Bridge",
2125 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2126 US_FL_BROKEN_FUA ),
2127
2121/* Reported-by George Cherian <george.cherian@cavium.com> */ 2128/* Reported-by George Cherian <george.cherian@cavium.com> */
2122UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, 2129UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
2123 "JMicron", 2130 "JMicron",
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index 9ce4756adad6..dcd8ef085b30 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -1857,7 +1857,8 @@ static int fusb302_probe(struct i2c_client *client,
1857 chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); 1857 chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
1858 if (IS_ERR(chip->tcpm_port)) { 1858 if (IS_ERR(chip->tcpm_port)) {
1859 ret = PTR_ERR(chip->tcpm_port); 1859 ret = PTR_ERR(chip->tcpm_port);
1860 dev_err(dev, "cannot register tcpm port, ret=%d", ret); 1860 if (ret != -EPROBE_DEFER)
1861 dev_err(dev, "cannot register tcpm port, ret=%d", ret);
1861 goto destroy_workqueue; 1862 goto destroy_workqueue;
1862 } 1863 }
1863 1864
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index f4d563ee7690..8b637a4b474b 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -252,9 +252,6 @@ struct tcpm_port {
252 unsigned int nr_src_pdo; 252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS]; 253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo; 254 unsigned int nr_snk_pdo;
255 unsigned int nr_fixed; /* number of fixed sink PDOs */
256 unsigned int nr_var; /* number of variable sink PDOs */
257 unsigned int nr_batt; /* number of battery sink PDOs */
258 u32 snk_vdo[VDO_MAX_OBJECTS]; 255 u32 snk_vdo[VDO_MAX_OBJECTS];
259 unsigned int nr_snk_vdo; 256 unsigned int nr_snk_vdo;
260 257
@@ -1770,90 +1767,39 @@ static int tcpm_pd_check_request(struct tcpm_port *port)
1770 return 0; 1767 return 0;
1771} 1768}
1772 1769
1773#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) 1770static int tcpm_pd_select_pdo(struct tcpm_port *port)
1774#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
1775
1776static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
1777 int *src_pdo)
1778{ 1771{
1779 unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0; 1772 unsigned int i, max_mw = 0, max_mv = 0;
1780 int ret = -EINVAL; 1773 int ret = -EINVAL;
1781 1774
1782 /* 1775 /*
1783 * Select the source PDO providing the most power which has a 1776 * Select the source PDO providing the most power while staying within
1784 * matchig sink cap. 1777 * the board's voltage limits. Prefer PDO providing exp
1785 */ 1778 */
1786 for (i = 0; i < port->nr_source_caps; i++) { 1779 for (i = 0; i < port->nr_source_caps; i++) {
1787 u32 pdo = port->source_caps[i]; 1780 u32 pdo = port->source_caps[i];
1788 enum pd_pdo_type type = pdo_type(pdo); 1781 enum pd_pdo_type type = pdo_type(pdo);
1782 unsigned int mv, ma, mw;
1789 1783
1790 if (type == PDO_TYPE_FIXED) { 1784 if (type == PDO_TYPE_FIXED)
1791 for (j = 0; j < port->nr_fixed; j++) { 1785 mv = pdo_fixed_voltage(pdo);
1792 if (pdo_fixed_voltage(pdo) == 1786 else
1793 pdo_fixed_voltage(port->snk_pdo[j])) { 1787 mv = pdo_min_voltage(pdo);
1794 ma = min_current(pdo, port->snk_pdo[j]); 1788
1795 mv = pdo_fixed_voltage(pdo); 1789 if (type == PDO_TYPE_BATT) {
1796 mw = ma * mv / 1000; 1790 mw = pdo_max_power(pdo);
1797 if (mw > max_mw || 1791 } else {
1798 (mw == max_mw && mv > max_mv)) { 1792 ma = min(pdo_max_current(pdo),
1799 ret = 0; 1793 port->max_snk_ma);
1800 *src_pdo = i; 1794 mw = ma * mv / 1000;
1801 *sink_pdo = j; 1795 }
1802 max_mw = mw; 1796
1803 max_mv = mv; 1797 /* Perfer higher voltages if available */
1804 } 1798 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1805 /* There could only be one fixed pdo 1799 mv <= port->max_snk_mv) {
1806 * at a specific voltage level. 1800 ret = i;
1807 * So breaking here. 1801 max_mw = mw;
1808 */ 1802 max_mv = mv;
1809 break;
1810 }
1811 }
1812 } else if (type == PDO_TYPE_BATT) {
1813 for (j = port->nr_fixed;
1814 j < port->nr_fixed +
1815 port->nr_batt;
1816 j++) {
1817 if (pdo_min_voltage(pdo) >=
1818 pdo_min_voltage(port->snk_pdo[j]) &&
1819 pdo_max_voltage(pdo) <=
1820 pdo_max_voltage(port->snk_pdo[j])) {
1821 mw = min_power(pdo, port->snk_pdo[j]);
1822 mv = pdo_min_voltage(pdo);
1823 if (mw > max_mw ||
1824 (mw == max_mw && mv > max_mv)) {
1825 ret = 0;
1826 *src_pdo = i;
1827 *sink_pdo = j;
1828 max_mw = mw;
1829 max_mv = mv;
1830 }
1831 }
1832 }
1833 } else if (type == PDO_TYPE_VAR) {
1834 for (j = port->nr_fixed +
1835 port->nr_batt;
1836 j < port->nr_fixed +
1837 port->nr_batt +
1838 port->nr_var;
1839 j++) {
1840 if (pdo_min_voltage(pdo) >=
1841 pdo_min_voltage(port->snk_pdo[j]) &&
1842 pdo_max_voltage(pdo) <=
1843 pdo_max_voltage(port->snk_pdo[j])) {
1844 ma = min_current(pdo, port->snk_pdo[j]);
1845 mv = pdo_min_voltage(pdo);
1846 mw = ma * mv / 1000;
1847 if (mw > max_mw ||
1848 (mw == max_mw && mv > max_mv)) {
1849 ret = 0;
1850 *src_pdo = i;
1851 *sink_pdo = j;
1852 max_mw = mw;
1853 max_mv = mv;
1854 }
1855 }
1856 }
1857 } 1803 }
1858 } 1804 }
1859 1805
@@ -1865,14 +1811,13 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1865 unsigned int mv, ma, mw, flags; 1811 unsigned int mv, ma, mw, flags;
1866 unsigned int max_ma, max_mw; 1812 unsigned int max_ma, max_mw;
1867 enum pd_pdo_type type; 1813 enum pd_pdo_type type;
1868 int src_pdo_index, snk_pdo_index; 1814 int index;
1869 u32 pdo, matching_snk_pdo; 1815 u32 pdo;
1870 1816
1871 if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0) 1817 index = tcpm_pd_select_pdo(port);
1818 if (index < 0)
1872 return -EINVAL; 1819 return -EINVAL;
1873 1820 pdo = port->source_caps[index];
1874 pdo = port->source_caps[src_pdo_index];
1875 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
1876 type = pdo_type(pdo); 1821 type = pdo_type(pdo);
1877 1822
1878 if (type == PDO_TYPE_FIXED) 1823 if (type == PDO_TYPE_FIXED)
@@ -1880,28 +1825,26 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1880 else 1825 else
1881 mv = pdo_min_voltage(pdo); 1826 mv = pdo_min_voltage(pdo);
1882 1827
1883 /* Select maximum available current within the sink pdo's limit */ 1828 /* Select maximum available current within the board's power limit */
1884 if (type == PDO_TYPE_BATT) { 1829 if (type == PDO_TYPE_BATT) {
1885 mw = min_power(pdo, matching_snk_pdo); 1830 mw = pdo_max_power(pdo);
1886 ma = 1000 * mw / mv; 1831 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1887 } else { 1832 } else {
1888 ma = min_current(pdo, matching_snk_pdo); 1833 ma = min(pdo_max_current(pdo),
1889 mw = ma * mv / 1000; 1834 1000 * port->max_snk_mw / mv);
1890 } 1835 }
1836 ma = min(ma, port->max_snk_ma);
1891 1837
1892 flags = RDO_USB_COMM | RDO_NO_SUSPEND; 1838 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1893 1839
1894 /* Set mismatch bit if offered power is less than operating power */ 1840 /* Set mismatch bit if offered power is less than operating power */
1841 mw = ma * mv / 1000;
1895 max_ma = ma; 1842 max_ma = ma;
1896 max_mw = mw; 1843 max_mw = mw;
1897 if (mw < port->operating_snk_mw) { 1844 if (mw < port->operating_snk_mw) {
1898 flags |= RDO_CAP_MISMATCH; 1845 flags |= RDO_CAP_MISMATCH;
1899 if (type == PDO_TYPE_BATT && 1846 max_mw = port->operating_snk_mw;
1900 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) 1847 max_ma = max_mw * 1000 / mv;
1901 max_mw = pdo_max_power(matching_snk_pdo);
1902 else if (pdo_max_current(matching_snk_pdo) >
1903 pdo_max_current(pdo))
1904 max_ma = pdo_max_current(matching_snk_pdo);
1905 } 1848 }
1906 1849
1907 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", 1850 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
@@ -1910,16 +1853,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1910 port->polarity); 1853 port->polarity);
1911 1854
1912 if (type == PDO_TYPE_BATT) { 1855 if (type == PDO_TYPE_BATT) {
1913 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); 1856 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1914 1857
1915 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", 1858 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1916 src_pdo_index, mv, mw, 1859 index, mv, mw,
1917 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 1860 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1918 } else { 1861 } else {
1919 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); 1862 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1920 1863
1921 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", 1864 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1922 src_pdo_index, mv, ma, 1865 index, mv, ma,
1923 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 1866 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1924 } 1867 }
1925 1868
@@ -3650,19 +3593,6 @@ int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3650} 3593}
3651EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); 3594EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3652 3595
3653static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo,
3654 enum pd_pdo_type type)
3655{
3656 int count = 0;
3657 int i;
3658
3659 for (i = 0; i < nr_pdo; i++) {
3660 if (pdo_type(pdo[i]) == type)
3661 count++;
3662 }
3663 return count;
3664}
3665
3666struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) 3596struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3667{ 3597{
3668 struct tcpm_port *port; 3598 struct tcpm_port *port;
@@ -3708,15 +3638,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3708 tcpc->config->nr_src_pdo); 3638 tcpc->config->nr_src_pdo);
3709 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3639 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3710 tcpc->config->nr_snk_pdo); 3640 tcpc->config->nr_snk_pdo);
3711 port->nr_fixed = nr_type_pdos(port->snk_pdo,
3712 port->nr_snk_pdo,
3713 PDO_TYPE_FIXED);
3714 port->nr_var = nr_type_pdos(port->snk_pdo,
3715 port->nr_snk_pdo,
3716 PDO_TYPE_VAR);
3717 port->nr_batt = nr_type_pdos(port->snk_pdo,
3718 port->nr_snk_pdo,
3719 PDO_TYPE_BATT);
3720 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, 3641 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3721 tcpc->config->nr_snk_vdo); 3642 tcpc->config->nr_snk_vdo);
3722 3643
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index d86f72bbbb91..6dcd3ff655c3 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -105,10 +105,14 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
105 if (rv != 0) 105 if (rv != 0)
106 return -EINVAL; 106 return -EINVAL;
107 107
108 if (!udc) {
109 dev_err(dev, "no device");
110 return -ENODEV;
111 }
108 spin_lock_irqsave(&udc->lock, flags); 112 spin_lock_irqsave(&udc->lock, flags);
109 /* Don't export what we don't have */ 113 /* Don't export what we don't have */
110 if (!udc || !udc->driver || !udc->pullup) { 114 if (!udc->driver || !udc->pullup) {
111 dev_err(dev, "no device or gadget not bound"); 115 dev_err(dev, "gadget not bound");
112 ret = -ENODEV; 116 ret = -ENODEV;
113 goto unlock; 117 goto unlock;
114 } 118 }
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index b5fb56b822fd..a31d9b240af8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -170,7 +170,7 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
170 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { 170 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
171 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, 171 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
172 vhost_net_buf_get_size(rxq), 172 vhost_net_buf_get_size(rxq),
173 __skb_array_destroy_skb); 173 tun_ptr_free);
174 rxq->head = rxq->tail = 0; 174 rxq->head = rxq->tail = 0;
175 } 175 }
176} 176}
@@ -948,6 +948,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
948 n->vqs[i].done_idx = 0; 948 n->vqs[i].done_idx = 0;
949 n->vqs[i].vhost_hlen = 0; 949 n->vqs[i].vhost_hlen = 0;
950 n->vqs[i].sock_hlen = 0; 950 n->vqs[i].sock_hlen = 0;
951 n->vqs[i].rx_ring = NULL;
951 vhost_net_buf_init(&n->vqs[i].rxq); 952 vhost_net_buf_init(&n->vqs[i].rxq);
952 } 953 }
953 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 954 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
@@ -972,6 +973,7 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
972 vhost_net_disable_vq(n, vq); 973 vhost_net_disable_vq(n, vq);
973 vq->private_data = NULL; 974 vq->private_data = NULL;
974 vhost_net_buf_unproduce(nvq); 975 vhost_net_buf_unproduce(nvq);
976 nvq->rx_ring = NULL;
975 mutex_unlock(&vq->mutex); 977 mutex_unlock(&vq->mutex);
976 return sock; 978 return sock;
977} 979}
@@ -1160,14 +1162,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1160 vhost_net_disable_vq(n, vq); 1162 vhost_net_disable_vq(n, vq);
1161 vq->private_data = sock; 1163 vq->private_data = sock;
1162 vhost_net_buf_unproduce(nvq); 1164 vhost_net_buf_unproduce(nvq);
1163 if (index == VHOST_NET_VQ_RX)
1164 nvq->rx_ring = get_tap_ptr_ring(fd);
1165 r = vhost_vq_init_access(vq); 1165 r = vhost_vq_init_access(vq);
1166 if (r) 1166 if (r)
1167 goto err_used; 1167 goto err_used;
1168 r = vhost_net_enable_vq(n, vq); 1168 r = vhost_net_enable_vq(n, vq);
1169 if (r) 1169 if (r)
1170 goto err_used; 1170 goto err_used;
1171 if (index == VHOST_NET_VQ_RX)
1172 nvq->rx_ring = get_tap_ptr_ring(fd);
1171 1173
1172 oldubufs = nvq->ubufs; 1174 oldubufs = nvq->ubufs;
1173 nvq->ubufs = ubufs; 1175 nvq->ubufs = ubufs;
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index af6fc97f4ba4..a436d44f1b7f 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
122 unsigned char __user *ured; 122 unsigned char __user *ured;
123 unsigned char __user *ugreen; 123 unsigned char __user *ugreen;
124 unsigned char __user *ublue; 124 unsigned char __user *ublue;
125 int index, count, i; 125 unsigned int index, count, i;
126 126
127 if (get_user(index, &c->index) || 127 if (get_user(index, &c->index) ||
128 __get_user(count, &c->count) || 128 __get_user(count, &c->count) ||
@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
161 unsigned char __user *ugreen; 161 unsigned char __user *ugreen;
162 unsigned char __user *ublue; 162 unsigned char __user *ublue;
163 struct fb_cmap *cmap = &info->cmap; 163 struct fb_cmap *cmap = &info->cmap;
164 int index, count, i; 164 unsigned int index, count, i;
165 u8 red, green, blue; 165 u8 red, green, blue;
166 166
167 if (get_user(index, &c->index) || 167 if (get_user(index, &c->index) ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index eb30f3e09a47..71458f493cf8 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -428,8 +428,6 @@ unmap_release:
428 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); 428 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
429 } 429 }
430 430
431 vq->vq.num_free += total_sg;
432
433 if (indirect) 431 if (indirect)
434 kfree(desc); 432 kfree(desc);
435 433
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index e0678c14480f..3a33c5344bd5 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf,
566 char c; 566 char c;
567 if (get_user(c, buf + i)) 567 if (get_user(c, buf + i))
568 return -EFAULT; 568 return -EFAULT;
569 expect_close = (c == 'V'); 569 if (c == 'V')
570 expect_close = true;
570 } 571 }
571 572
572 /* Properly order writes across fork()ed processes */ 573 /* Properly order writes across fork()ed processes */
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index f1f00dfc0e68..b0a158073abd 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -28,16 +28,7 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <linux/watchdog.h> 30#include <linux/watchdog.h>
31#ifdef CONFIG_HPWDT_NMI_DECODING
32#include <linux/dmi.h>
33#include <linux/spinlock.h>
34#include <linux/nmi.h>
35#include <linux/kdebug.h>
36#include <linux/notifier.h>
37#include <asm/set_memory.h>
38#endif /* CONFIG_HPWDT_NMI_DECODING */
39#include <asm/nmi.h> 31#include <asm/nmi.h>
40#include <asm/frame.h>
41 32
42#define HPWDT_VERSION "1.4.0" 33#define HPWDT_VERSION "1.4.0"
43#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) 34#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
@@ -48,6 +39,9 @@
48static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ 39static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
49static unsigned int reload; /* the computed soft_margin */ 40static unsigned int reload; /* the computed soft_margin */
50static bool nowayout = WATCHDOG_NOWAYOUT; 41static bool nowayout = WATCHDOG_NOWAYOUT;
42#ifdef CONFIG_HPWDT_NMI_DECODING
43static unsigned int allow_kdump = 1;
44#endif
51static char expect_release; 45static char expect_release;
52static unsigned long hpwdt_is_open; 46static unsigned long hpwdt_is_open;
53 47
@@ -63,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = {
63}; 57};
64MODULE_DEVICE_TABLE(pci, hpwdt_devices); 58MODULE_DEVICE_TABLE(pci, hpwdt_devices);
65 59
66#ifdef CONFIG_HPWDT_NMI_DECODING
67#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
68#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
69#define PCI_BIOS32_PARAGRAPH_LEN 16
70#define PCI_ROM_BASE1 0x000F0000
71#define ROM_SIZE 0x10000
72
73struct bios32_service_dir {
74 u32 signature;
75 u32 entry_point;
76 u8 revision;
77 u8 length;
78 u8 checksum;
79 u8 reserved[5];
80};
81
82/* type 212 */
83struct smbios_cru64_info {
84 u8 type;
85 u8 byte_length;
86 u16 handle;
87 u32 signature;
88 u64 physical_address;
89 u32 double_length;
90 u32 double_offset;
91};
92#define SMBIOS_CRU64_INFORMATION 212
93
94/* type 219 */
95struct smbios_proliant_info {
96 u8 type;
97 u8 byte_length;
98 u16 handle;
99 u32 power_features;
100 u32 omega_features;
101 u32 reserved;
102 u32 misc_features;
103};
104#define SMBIOS_ICRU_INFORMATION 219
105
106
107struct cmn_registers {
108 union {
109 struct {
110 u8 ral;
111 u8 rah;
112 u16 rea2;
113 };
114 u32 reax;
115 } u1;
116 union {
117 struct {
118 u8 rbl;
119 u8 rbh;
120 u8 reb2l;
121 u8 reb2h;
122 };
123 u32 rebx;
124 } u2;
125 union {
126 struct {
127 u8 rcl;
128 u8 rch;
129 u16 rec2;
130 };
131 u32 recx;
132 } u3;
133 union {
134 struct {
135 u8 rdl;
136 u8 rdh;
137 u16 red2;
138 };
139 u32 redx;
140 } u4;
141
142 u32 resi;
143 u32 redi;
144 u16 rds;
145 u16 res;
146 u32 reflags;
147} __attribute__((packed));
148
149static unsigned int hpwdt_nmi_decoding;
150static unsigned int allow_kdump = 1;
151static unsigned int is_icru;
152static unsigned int is_uefi;
153static DEFINE_SPINLOCK(rom_lock);
154static void *cru_rom_addr;
155static struct cmn_registers cmn_regs;
156
157extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
158 unsigned long *pRomEntry);
159
160#ifdef CONFIG_X86_32
161/* --32 Bit Bios------------------------------------------------------------ */
162
163#define HPWDT_ARCH 32
164
165asm(".text \n\t"
166 ".align 4 \n\t"
167 ".globl asminline_call \n"
168 "asminline_call: \n\t"
169 "pushl %ebp \n\t"
170 "movl %esp, %ebp \n\t"
171 "pusha \n\t"
172 "pushf \n\t"
173 "push %es \n\t"
174 "push %ds \n\t"
175 "pop %es \n\t"
176 "movl 8(%ebp),%eax \n\t"
177 "movl 4(%eax),%ebx \n\t"
178 "movl 8(%eax),%ecx \n\t"
179 "movl 12(%eax),%edx \n\t"
180 "movl 16(%eax),%esi \n\t"
181 "movl 20(%eax),%edi \n\t"
182 "movl (%eax),%eax \n\t"
183 "push %cs \n\t"
184 "call *12(%ebp) \n\t"
185 "pushf \n\t"
186 "pushl %eax \n\t"
187 "movl 8(%ebp),%eax \n\t"
188 "movl %ebx,4(%eax) \n\t"
189 "movl %ecx,8(%eax) \n\t"
190 "movl %edx,12(%eax) \n\t"
191 "movl %esi,16(%eax) \n\t"
192 "movl %edi,20(%eax) \n\t"
193 "movw %ds,24(%eax) \n\t"
194 "movw %es,26(%eax) \n\t"
195 "popl %ebx \n\t"
196 "movl %ebx,(%eax) \n\t"
197 "popl %ebx \n\t"
198 "movl %ebx,28(%eax) \n\t"
199 "pop %es \n\t"
200 "popf \n\t"
201 "popa \n\t"
202 "leave \n\t"
203 "ret \n\t"
204 ".previous");
205
206
207/*
208 * cru_detect
209 *
210 * Routine Description:
211 * This function uses the 32-bit BIOS Service Directory record to
212 * search for a $CRU record.
213 *
214 * Return Value:
215 * 0 : SUCCESS
216 * <0 : FAILURE
217 */
218static int cru_detect(unsigned long map_entry,
219 unsigned long map_offset)
220{
221 void *bios32_map;
222 unsigned long *bios32_entrypoint;
223 unsigned long cru_physical_address;
224 unsigned long cru_length;
225 unsigned long physical_bios_base = 0;
226 unsigned long physical_bios_offset = 0;
227 int retval = -ENODEV;
228
229 bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
230
231 if (bios32_map == NULL)
232 return -ENODEV;
233
234 bios32_entrypoint = bios32_map + map_offset;
235
236 cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
237
238 set_memory_x((unsigned long)bios32_map, 2);
239 asminline_call(&cmn_regs, bios32_entrypoint);
240
241 if (cmn_regs.u1.ral != 0) {
242 pr_warn("Call succeeded but with an error: 0x%x\n",
243 cmn_regs.u1.ral);
244 } else {
245 physical_bios_base = cmn_regs.u2.rebx;
246 physical_bios_offset = cmn_regs.u4.redx;
247 cru_length = cmn_regs.u3.recx;
248 cru_physical_address =
249 physical_bios_base + physical_bios_offset;
250
251 /* If the values look OK, then map it in. */
252 if ((physical_bios_base + physical_bios_offset)) {
253 cru_rom_addr =
254 ioremap(cru_physical_address, cru_length);
255 if (cru_rom_addr) {
256 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
257 (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
258 retval = 0;
259 }
260 }
261
262 pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base);
263 pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset);
264 pr_debug("CRU Length: 0x%lx\n", cru_length);
265 pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr);
266 }
267 iounmap(bios32_map);
268 return retval;
269}
270
271/*
272 * bios_checksum
273 */
274static int bios_checksum(const char __iomem *ptr, int len)
275{
276 char sum = 0;
277 int i;
278
279 /*
280 * calculate checksum of size bytes. This should add up
281 * to zero if we have a valid header.
282 */
283 for (i = 0; i < len; i++)
284 sum += ptr[i];
285
286 return ((sum == 0) && (len > 0));
287}
288
289/*
290 * bios32_present
291 *
292 * Routine Description:
293 * This function finds the 32-bit BIOS Service Directory
294 *
295 * Return Value:
296 * 0 : SUCCESS
297 * <0 : FAILURE
298 */
299static int bios32_present(const char __iomem *p)
300{
301 struct bios32_service_dir *bios_32_ptr;
302 int length;
303 unsigned long map_entry, map_offset;
304
305 bios_32_ptr = (struct bios32_service_dir *) p;
306
307 /*
308 * Search for signature by checking equal to the swizzled value
309 * instead of calling another routine to perform a strcmp.
310 */
311 if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
312 length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
313 if (bios_checksum(p, length)) {
314 /*
315 * According to the spec, we're looking for the
316 * first 4KB-aligned address below the entrypoint
317 * listed in the header. The Service Directory code
318 * is guaranteed to occupy no more than 2 4KB pages.
319 */
320 map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
321 map_offset = bios_32_ptr->entry_point - map_entry;
322
323 return cru_detect(map_entry, map_offset);
324 }
325 }
326 return -ENODEV;
327}
328
329static int detect_cru_service(void)
330{
331 char __iomem *p, *q;
332 int rc = -1;
333
334 /*
335 * Search from 0x0f0000 through 0x0fffff, inclusive.
336 */
337 p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
338 if (p == NULL)
339 return -ENOMEM;
340
341 for (q = p; q < p + ROM_SIZE; q += 16) {
342 rc = bios32_present(q);
343 if (!rc)
344 break;
345 }
346 iounmap(p);
347 return rc;
348}
349/* ------------------------------------------------------------------------- */
350#endif /* CONFIG_X86_32 */
351#ifdef CONFIG_X86_64
352/* --64 Bit Bios------------------------------------------------------------ */
353
354#define HPWDT_ARCH 64
355
356asm(".text \n\t"
357 ".align 4 \n\t"
358 ".globl asminline_call \n\t"
359 ".type asminline_call, @function \n\t"
360 "asminline_call: \n\t"
361 FRAME_BEGIN
362 "pushq %rax \n\t"
363 "pushq %rbx \n\t"
364 "pushq %rdx \n\t"
365 "pushq %r12 \n\t"
366 "pushq %r9 \n\t"
367 "movq %rsi, %r12 \n\t"
368 "movq %rdi, %r9 \n\t"
369 "movl 4(%r9),%ebx \n\t"
370 "movl 8(%r9),%ecx \n\t"
371 "movl 12(%r9),%edx \n\t"
372 "movl 16(%r9),%esi \n\t"
373 "movl 20(%r9),%edi \n\t"
374 "movl (%r9),%eax \n\t"
375 "call *%r12 \n\t"
376 "pushfq \n\t"
377 "popq %r12 \n\t"
378 "movl %eax, (%r9) \n\t"
379 "movl %ebx, 4(%r9) \n\t"
380 "movl %ecx, 8(%r9) \n\t"
381 "movl %edx, 12(%r9) \n\t"
382 "movl %esi, 16(%r9) \n\t"
383 "movl %edi, 20(%r9) \n\t"
384 "movq %r12, %rax \n\t"
385 "movl %eax, 28(%r9) \n\t"
386 "popq %r9 \n\t"
387 "popq %r12 \n\t"
388 "popq %rdx \n\t"
389 "popq %rbx \n\t"
390 "popq %rax \n\t"
391 FRAME_END
392 "ret \n\t"
393 ".previous");
394
395/*
396 * dmi_find_cru
397 *
398 * Routine Description:
399 * This function checks whether or not a SMBIOS/DMI record is
400 * the 64bit CRU info or not
401 */
402static void dmi_find_cru(const struct dmi_header *dm, void *dummy)
403{
404 struct smbios_cru64_info *smbios_cru64_ptr;
405 unsigned long cru_physical_address;
406
407 if (dm->type == SMBIOS_CRU64_INFORMATION) {
408 smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
409 if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
410 cru_physical_address =
411 smbios_cru64_ptr->physical_address +
412 smbios_cru64_ptr->double_offset;
413 cru_rom_addr = ioremap(cru_physical_address,
414 smbios_cru64_ptr->double_length);
415 set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
416 smbios_cru64_ptr->double_length >> PAGE_SHIFT);
417 }
418 }
419}
420
421static int detect_cru_service(void)
422{
423 cru_rom_addr = NULL;
424
425 dmi_walk(dmi_find_cru, NULL);
426
427 /* if cru_rom_addr has been set then we found a CRU service */
428 return ((cru_rom_addr != NULL) ? 0 : -ENODEV);
429}
430/* ------------------------------------------------------------------------- */
431#endif /* CONFIG_X86_64 */
432#endif /* CONFIG_HPWDT_NMI_DECODING */
433 60
434/* 61/*
435 * Watchdog operations 62 * Watchdog operations
@@ -486,30 +113,12 @@ static int hpwdt_my_nmi(void)
486 */ 113 */
487static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) 114static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
488{ 115{
489 unsigned long rom_pl;
490 static int die_nmi_called;
491
492 if (!hpwdt_nmi_decoding)
493 return NMI_DONE;
494
495 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) 116 if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
496 return NMI_DONE; 117 return NMI_DONE;
497 118
498 spin_lock_irqsave(&rom_lock, rom_pl);
499 if (!die_nmi_called && !is_icru && !is_uefi)
500 asminline_call(&cmn_regs, cru_rom_addr);
501 die_nmi_called = 1;
502 spin_unlock_irqrestore(&rom_lock, rom_pl);
503
504 if (allow_kdump) 119 if (allow_kdump)
505 hpwdt_stop(); 120 hpwdt_stop();
506 121
507 if (!is_icru && !is_uefi) {
508 if (cmn_regs.u1.ral == 0) {
509 nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
510 return NMI_HANDLED;
511 }
512 }
513 nmi_panic(regs, "An NMI occurred. Depending on your system the reason " 122 nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
514 "for the NMI is logged in any one of the following " 123 "for the NMI is logged in any one of the following "
515 "resources:\n" 124 "resources:\n"
@@ -675,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = {
675 * Init & Exit 284 * Init & Exit
676 */ 285 */
677 286
678#ifdef CONFIG_HPWDT_NMI_DECODING
679#ifdef CONFIG_X86_LOCAL_APIC
680static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
681{
682 /*
683 * If nmi_watchdog is turned off then we can turn on
684 * our nmi decoding capability.
685 */
686 hpwdt_nmi_decoding = 1;
687}
688#else
689static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
690{
691 dev_warn(&dev->dev, "NMI decoding is disabled. "
692 "Your kernel does not support a NMI Watchdog.\n");
693}
694#endif /* CONFIG_X86_LOCAL_APIC */
695
696/*
697 * dmi_find_icru
698 *
699 * Routine Description:
700 * This function checks whether or not we are on an iCRU-based server.
701 * This check is independent of architecture and needs to be made for
702 * any ProLiant system.
703 */
704static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
705{
706 struct smbios_proliant_info *smbios_proliant_ptr;
707
708 if (dm->type == SMBIOS_ICRU_INFORMATION) {
709 smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
710 if (smbios_proliant_ptr->misc_features & 0x01)
711 is_icru = 1;
712 if (smbios_proliant_ptr->misc_features & 0x1400)
713 is_uefi = 1;
714 }
715}
716 287
717static int hpwdt_init_nmi_decoding(struct pci_dev *dev) 288static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
718{ 289{
290#ifdef CONFIG_HPWDT_NMI_DECODING
719 int retval; 291 int retval;
720
721 /*
722 * On typical CRU-based systems we need to map that service in
723 * the BIOS. For 32 bit Operating Systems we need to go through
724 * the 32 Bit BIOS Service Directory. For 64 bit Operating
725 * Systems we get that service through SMBIOS.
726 *
727 * On systems that support the new iCRU service all we need to
728 * do is call dmi_walk to get the supported flag value and skip
729 * the old cru detect code.
730 */
731 dmi_walk(dmi_find_icru, NULL);
732 if (!is_icru && !is_uefi) {
733
734 /*
735 * We need to map the ROM to get the CRU service.
736 * For 32 bit Operating Systems we need to go through the 32 Bit
737 * BIOS Service Directory
738 * For 64 bit Operating Systems we get that service through SMBIOS.
739 */
740 retval = detect_cru_service();
741 if (retval < 0) {
742 dev_warn(&dev->dev,
743 "Unable to detect the %d Bit CRU Service.\n",
744 HPWDT_ARCH);
745 return retval;
746 }
747
748 /*
749 * We know this is the only CRU call we need to make so lets keep as
750 * few instructions as possible once the NMI comes in.
751 */
752 cmn_regs.u1.rah = 0x0D;
753 cmn_regs.u1.ral = 0x02;
754 }
755
756 /* 292 /*
757 * Only one function can register for NMI_UNKNOWN 293 * Only one function can register for NMI_UNKNOWN
758 */ 294 */
@@ -780,45 +316,26 @@ error:
780 dev_warn(&dev->dev, 316 dev_warn(&dev->dev,
781 "Unable to register a die notifier (err=%d).\n", 317 "Unable to register a die notifier (err=%d).\n",
782 retval); 318 retval);
783 if (cru_rom_addr)
784 iounmap(cru_rom_addr);
785 return retval; 319 return retval;
320#endif /* CONFIG_HPWDT_NMI_DECODING */
321 return 0;
786} 322}
787 323
788static void hpwdt_exit_nmi_decoding(void) 324static void hpwdt_exit_nmi_decoding(void)
789{ 325{
326#ifdef CONFIG_HPWDT_NMI_DECODING
790 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); 327 unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
791 unregister_nmi_handler(NMI_SERR, "hpwdt"); 328 unregister_nmi_handler(NMI_SERR, "hpwdt");
792 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); 329 unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
793 if (cru_rom_addr) 330#endif
794 iounmap(cru_rom_addr);
795}
796#else /* !CONFIG_HPWDT_NMI_DECODING */
797static void hpwdt_check_nmi_decoding(struct pci_dev *dev)
798{
799}
800
801static int hpwdt_init_nmi_decoding(struct pci_dev *dev)
802{
803 return 0;
804} 331}
805 332
806static void hpwdt_exit_nmi_decoding(void)
807{
808}
809#endif /* CONFIG_HPWDT_NMI_DECODING */
810
811static int hpwdt_init_one(struct pci_dev *dev, 333static int hpwdt_init_one(struct pci_dev *dev,
812 const struct pci_device_id *ent) 334 const struct pci_device_id *ent)
813{ 335{
814 int retval; 336 int retval;
815 337
816 /* 338 /*
817 * Check if we can do NMI decoding or not
818 */
819 hpwdt_check_nmi_decoding(dev);
820
821 /*
822 * First let's find out if we are on an iLO2+ server. We will 339 * First let's find out if we are on an iLO2+ server. We will
823 * not run on a legacy ASM box. 340 * not run on a legacy ASM box.
824 * So we only support the G5 ProLiant servers and higher. 341 * So we only support the G5 ProLiant servers and higher.
@@ -922,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
922#ifdef CONFIG_HPWDT_NMI_DECODING 439#ifdef CONFIG_HPWDT_NMI_DECODING
923module_param(allow_kdump, int, 0); 440module_param(allow_kdump, int, 0);
924MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); 441MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs");
925#endif /* !CONFIG_HPWDT_NMI_DECODING */ 442#endif /* CONFIG_HPWDT_NMI_DECODING */
926 443
927module_pci_driver(hpwdt_driver); 444module_pci_driver(hpwdt_driver);
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index 316c2eb122d2..e8bd9887c566 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -50,6 +50,7 @@
50 */ 50 */
51 51
52#include <linux/io.h> 52#include <linux/io.h>
53#include <linux/io-64-nonatomic-lo-hi.h>
53#include <linux/interrupt.h> 54#include <linux/interrupt.h>
54#include <linux/module.h> 55#include <linux/module.h>
55#include <linux/moduleparam.h> 56#include <linux/moduleparam.h>
@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
159 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) 160 !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
160 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); 161 timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
161 162
162 timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - 163 timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
163 arch_counter_get_cntvct(); 164 arch_counter_get_cntvct();
164 165
165 do_div(timeleft, gwdt->clk); 166 do_div(timeleft, gwdt->clk);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 6d1fbda0f461..0da9943d405f 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
392 392
393 memset(&r, 0, sizeof(r)); 393 memset(&r, 0, sizeof(r));
394 r.start = gas->address; 394 r.start = gas->address;
395 r.end = r.start + gas->access_width; 395 r.end = r.start + gas->access_width - 1;
396 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 396 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
397 r.flags = IORESOURCE_MEM; 397 r.flags = IORESOURCE_MEM;
398 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 398 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 74888cacd0b0..ec9eb4fba59c 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus,
466 466
467 /* Register with generic device framework. */ 467 /* Register with generic device framework. */
468 err = device_register(&xendev->dev); 468 err = device_register(&xendev->dev);
469 if (err) 469 if (err) {
470 put_device(&xendev->dev);
471 xendev = NULL;
470 goto fail; 472 goto fail;
473 }
471 474
472 return 0; 475 return 0;
473fail: 476fail:
diff --git a/fs/aio.c b/fs/aio.c
index a062d75109cb..6bcd3fb5265a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -68,9 +68,9 @@ struct aio_ring {
68#define AIO_RING_PAGES 8 68#define AIO_RING_PAGES 8
69 69
70struct kioctx_table { 70struct kioctx_table {
71 struct rcu_head rcu; 71 struct rcu_head rcu;
72 unsigned nr; 72 unsigned nr;
73 struct kioctx *table[]; 73 struct kioctx __rcu *table[];
74}; 74};
75 75
76struct kioctx_cpu { 76struct kioctx_cpu {
@@ -115,7 +115,8 @@ struct kioctx {
115 struct page **ring_pages; 115 struct page **ring_pages;
116 long nr_pages; 116 long nr_pages;
117 117
118 struct work_struct free_work; 118 struct rcu_head free_rcu;
119 struct work_struct free_work; /* see free_ioctx() */
119 120
120 /* 121 /*
121 * signals when all in-flight requests are done 122 * signals when all in-flight requests are done
@@ -329,7 +330,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
329 for (i = 0; i < table->nr; i++) { 330 for (i = 0; i < table->nr; i++) {
330 struct kioctx *ctx; 331 struct kioctx *ctx;
331 332
332 ctx = table->table[i]; 333 ctx = rcu_dereference(table->table[i]);
333 if (ctx && ctx->aio_ring_file == file) { 334 if (ctx && ctx->aio_ring_file == file) {
334 if (!atomic_read(&ctx->dead)) { 335 if (!atomic_read(&ctx->dead)) {
335 ctx->user_id = ctx->mmap_base = vma->vm_start; 336 ctx->user_id = ctx->mmap_base = vma->vm_start;
@@ -588,6 +589,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
588 return cancel(&kiocb->common); 589 return cancel(&kiocb->common);
589} 590}
590 591
592/*
593 * free_ioctx() should be RCU delayed to synchronize against the RCU
594 * protected lookup_ioctx() and also needs process context to call
595 * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
596 * ->free_work.
597 */
591static void free_ioctx(struct work_struct *work) 598static void free_ioctx(struct work_struct *work)
592{ 599{
593 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 600 struct kioctx *ctx = container_of(work, struct kioctx, free_work);
@@ -601,6 +608,14 @@ static void free_ioctx(struct work_struct *work)
601 kmem_cache_free(kioctx_cachep, ctx); 608 kmem_cache_free(kioctx_cachep, ctx);
602} 609}
603 610
611static void free_ioctx_rcufn(struct rcu_head *head)
612{
613 struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
614
615 INIT_WORK(&ctx->free_work, free_ioctx);
616 schedule_work(&ctx->free_work);
617}
618
604static void free_ioctx_reqs(struct percpu_ref *ref) 619static void free_ioctx_reqs(struct percpu_ref *ref)
605{ 620{
606 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); 621 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
@@ -609,8 +624,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
609 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) 624 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
610 complete(&ctx->rq_wait->comp); 625 complete(&ctx->rq_wait->comp);
611 626
612 INIT_WORK(&ctx->free_work, free_ioctx); 627 /* Synchronize against RCU protected table->table[] dereferences */
613 schedule_work(&ctx->free_work); 628 call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
614} 629}
615 630
616/* 631/*
@@ -651,9 +666,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
651 while (1) { 666 while (1) {
652 if (table) 667 if (table)
653 for (i = 0; i < table->nr; i++) 668 for (i = 0; i < table->nr; i++)
654 if (!table->table[i]) { 669 if (!rcu_access_pointer(table->table[i])) {
655 ctx->id = i; 670 ctx->id = i;
656 table->table[i] = ctx; 671 rcu_assign_pointer(table->table[i], ctx);
657 spin_unlock(&mm->ioctx_lock); 672 spin_unlock(&mm->ioctx_lock);
658 673
659 /* While kioctx setup is in progress, 674 /* While kioctx setup is in progress,
@@ -834,11 +849,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
834 } 849 }
835 850
836 table = rcu_dereference_raw(mm->ioctx_table); 851 table = rcu_dereference_raw(mm->ioctx_table);
837 WARN_ON(ctx != table->table[ctx->id]); 852 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
838 table->table[ctx->id] = NULL; 853 RCU_INIT_POINTER(table->table[ctx->id], NULL);
839 spin_unlock(&mm->ioctx_lock); 854 spin_unlock(&mm->ioctx_lock);
840 855
841 /* percpu_ref_kill() will do the necessary call_rcu() */ 856 /* free_ioctx_reqs() will do the necessary RCU synchronization */
842 wake_up_all(&ctx->wait); 857 wake_up_all(&ctx->wait);
843 858
844 /* 859 /*
@@ -880,7 +895,8 @@ void exit_aio(struct mm_struct *mm)
880 895
881 skipped = 0; 896 skipped = 0;
882 for (i = 0; i < table->nr; ++i) { 897 for (i = 0; i < table->nr; ++i) {
883 struct kioctx *ctx = table->table[i]; 898 struct kioctx *ctx =
899 rcu_dereference_protected(table->table[i], true);
884 900
885 if (!ctx) { 901 if (!ctx) {
886 skipped++; 902 skipped++;
@@ -1069,7 +1085,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1069 if (!table || id >= table->nr) 1085 if (!table || id >= table->nr)
1070 goto out; 1086 goto out;
1071 1087
1072 ctx = table->table[id]; 1088 ctx = rcu_dereference(table->table[id]);
1073 if (ctx && ctx->user_id == ctx_id) { 1089 if (ctx && ctx->user_id == ctx_id) {
1074 percpu_ref_get(&ctx->users); 1090 percpu_ref_get(&ctx->users);
1075 ret = ctx; 1091 ret = ctx;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index f94b2d8c744a..26484648d090 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1519,6 +1519,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1519 if (!node) 1519 if (!node)
1520 break; 1520 break;
1521 bytenr = node->val; 1521 bytenr = node->val;
1522 shared.share_count = 0;
1522 cond_resched(); 1523 cond_resched();
1523 } 1524 }
1524 1525
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index dec0907dfb8a..fcfc20de2df3 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1370,6 +1370,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1370 stripe_start = stripe->physical; 1370 stripe_start = stripe->physical;
1371 if (physical >= stripe_start && 1371 if (physical >= stripe_start &&
1372 physical < stripe_start + rbio->stripe_len && 1372 physical < stripe_start + rbio->stripe_len &&
1373 stripe->dev->bdev &&
1373 bio->bi_disk == stripe->dev->bdev->bd_disk && 1374 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1374 bio->bi_partno == stripe->dev->bdev->bd_partno) { 1375 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1375 return i; 1376 return i;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index d11c70bff5a9..a8bafed931f4 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -423,7 +423,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
423{ 423{
424 struct btrfs_fs_info *fs_info = to_fs_info(kobj); 424 struct btrfs_fs_info *fs_info = to_fs_info(kobj);
425 425
426 return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize); 426 return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
427} 427}
428 428
429BTRFS_ATTR(, nodesize, btrfs_nodesize_show); 429BTRFS_ATTR(, nodesize, btrfs_nodesize_show);
@@ -433,7 +433,8 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
433{ 433{
434 struct btrfs_fs_info *fs_info = to_fs_info(kobj); 434 struct btrfs_fs_info *fs_info = to_fs_info(kobj);
435 435
436 return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize); 436 return snprintf(buf, PAGE_SIZE, "%u\n",
437 fs_info->super_copy->sectorsize);
437} 438}
438 439
439BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); 440BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show);
@@ -443,7 +444,8 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
443{ 444{
444 struct btrfs_fs_info *fs_info = to_fs_info(kobj); 445 struct btrfs_fs_info *fs_info = to_fs_info(kobj);
445 446
446 return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize); 447 return snprintf(buf, PAGE_SIZE, "%u\n",
448 fs_info->super_copy->sectorsize);
447} 449}
448 450
449BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); 451BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9220f004001c..04f07144b45c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1722,23 +1722,19 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
1722 1722
1723 super = fs_info->super_copy; 1723 super = fs_info->super_copy;
1724 1724
1725 /* update latest btrfs_super_block::chunk_root refs */
1726 root_item = &fs_info->chunk_root->root_item; 1725 root_item = &fs_info->chunk_root->root_item;
1727 btrfs_set_super_chunk_root(super, root_item->bytenr); 1726 super->chunk_root = root_item->bytenr;
1728 btrfs_set_super_chunk_root_generation(super, root_item->generation); 1727 super->chunk_root_generation = root_item->generation;
1729 btrfs_set_super_chunk_root_level(super, root_item->level); 1728 super->chunk_root_level = root_item->level;
1730 1729
1731 /* update latest btrfs_super_block::root refs */
1732 root_item = &fs_info->tree_root->root_item; 1730 root_item = &fs_info->tree_root->root_item;
1733 btrfs_set_super_root(super, root_item->bytenr); 1731 super->root = root_item->bytenr;
1734 btrfs_set_super_generation(super, root_item->generation); 1732 super->generation = root_item->generation;
1735 btrfs_set_super_root_level(super, root_item->level); 1733 super->root_level = root_item->level;
1736
1737 if (btrfs_test_opt(fs_info, SPACE_CACHE)) 1734 if (btrfs_test_opt(fs_info, SPACE_CACHE))
1738 btrfs_set_super_cache_generation(super, root_item->generation); 1735 super->cache_generation = root_item->generation;
1739 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) 1736 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1740 btrfs_set_super_uuid_tree_generation(super, 1737 super->uuid_tree_generation = root_item->generation;
1741 root_item->generation);
1742} 1738}
1743 1739
1744int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1740int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
diff --git a/fs/dcache.c b/fs/dcache.c
index 7c38f39958bc..8945e6cabd93 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -647,11 +647,16 @@ again:
647 spin_unlock(&parent->d_lock); 647 spin_unlock(&parent->d_lock);
648 goto again; 648 goto again;
649 } 649 }
650 rcu_read_unlock(); 650 if (parent != dentry) {
651 if (parent != dentry)
652 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 651 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
653 else 652 if (unlikely(dentry->d_lockref.count < 0)) {
653 spin_unlock(&parent->d_lock);
654 parent = NULL;
655 }
656 } else {
654 parent = NULL; 657 parent = NULL;
658 }
659 rcu_read_unlock();
655 return parent; 660 return parent;
656} 661}
657 662
@@ -2474,7 +2479,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
2474 2479
2475retry: 2480retry:
2476 rcu_read_lock(); 2481 rcu_read_lock();
2477 seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; 2482 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2478 r_seq = read_seqbegin(&rename_lock); 2483 r_seq = read_seqbegin(&rename_lock);
2479 dentry = __d_lookup_rcu(parent, name, &d_seq); 2484 dentry = __d_lookup_rcu(parent, name, &d_seq);
2480 if (unlikely(dentry)) { 2485 if (unlikely(dentry)) {
@@ -2495,8 +2500,14 @@ retry:
2495 rcu_read_unlock(); 2500 rcu_read_unlock();
2496 goto retry; 2501 goto retry;
2497 } 2502 }
2503
2504 if (unlikely(seq & 1)) {
2505 rcu_read_unlock();
2506 goto retry;
2507 }
2508
2498 hlist_bl_lock(b); 2509 hlist_bl_lock(b);
2499 if (unlikely(parent->d_inode->i_dir_seq != seq)) { 2510 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2500 hlist_bl_unlock(b); 2511 hlist_bl_unlock(b);
2501 rcu_read_unlock(); 2512 rcu_read_unlock();
2502 goto retry; 2513 goto retry;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 86d6a4435c87..51f940e76c5e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -807,9 +807,6 @@ do_alloc:
807 iomap->length = hole_size(inode, lblock, &mp); 807 iomap->length = hole_size(inode, lblock, &mp);
808 else 808 else
809 iomap->length = size - pos; 809 iomap->length = size - pos;
810 } else {
811 if (height <= ip->i_height)
812 iomap->length = hole_size(inode, lblock, &mp);
813 } 810 }
814 goto out_release; 811 goto out_release;
815} 812}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8fe1b0aa2896..b9a254dcc0e7 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -108,6 +108,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
108 pagevec_reinit(pvec); 108 pagevec_reinit(pvec);
109} 109}
110 110
111/*
112 * Mask used when checking the page offset value passed in via system
113 * calls. This value will be converted to a loff_t which is signed.
114 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
115 * value. The extra bit (- 1 in the shift value) is to take the sign
116 * bit into account.
117 */
118#define PGOFF_LOFFT_MAX \
119 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
120
111static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) 121static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
112{ 122{
113 struct inode *inode = file_inode(file); 123 struct inode *inode = file_inode(file);
@@ -127,12 +137,13 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
127 vma->vm_ops = &hugetlb_vm_ops; 137 vma->vm_ops = &hugetlb_vm_ops;
128 138
129 /* 139 /*
130 * Offset passed to mmap (before page shift) could have been 140 * page based offset in vm_pgoff could be sufficiently large to
131 * negative when represented as a (l)off_t. 141 * overflow a (l)off_t when converted to byte offset.
132 */ 142 */
133 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0) 143 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
134 return -EINVAL; 144 return -EINVAL;
135 145
146 /* must be huge page aligned */
136 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) 147 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
137 return -EINVAL; 148 return -EINVAL;
138 149
diff --git a/fs/namei.c b/fs/namei.c
index 921ae32dbc80..cafa365eeb70 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -559,9 +559,10 @@ static int __nd_alloc_stack(struct nameidata *nd)
559static bool path_connected(const struct path *path) 559static bool path_connected(const struct path *path)
560{ 560{
561 struct vfsmount *mnt = path->mnt; 561 struct vfsmount *mnt = path->mnt;
562 struct super_block *sb = mnt->mnt_sb;
562 563
563 /* Only bind mounts can have disconnected paths */ 564 /* Bind mounts and multi-root filesystems can have disconnected paths */
564 if (mnt->mnt_root == mnt->mnt_sb->s_root) 565 if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root))
565 return true; 566 return true;
566 567
567 return is_subdir(path->dentry, mnt->mnt_root); 568 return is_subdir(path->dentry, mnt->mnt_root);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8c10b0562e75..621c517b325c 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -86,10 +86,10 @@ struct nfs_direct_req {
86 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; 86 struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
87 int mirror_count; 87 int mirror_count;
88 88
89 loff_t io_start; /* Start offset for I/O */
89 ssize_t count, /* bytes actually processed */ 90 ssize_t count, /* bytes actually processed */
90 max_count, /* max expected count */ 91 max_count, /* max expected count */
91 bytes_left, /* bytes left to be sent */ 92 bytes_left, /* bytes left to be sent */
92 io_start, /* start of IO */
93 error; /* any reported error */ 93 error; /* any reported error */
94 struct completion completion; /* wait for i/o completion */ 94 struct completion completion; /* wait for i/o completion */
95 95
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index c13e826614b5..ee723aa153a3 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -292,8 +292,11 @@ pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
292void 292void
293pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) 293pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
294{ 294{
295 struct inode *inode = lo->plh_inode; 295 struct inode *inode;
296 296
297 if (!lo)
298 return;
299 inode = lo->plh_inode;
297 pnfs_layoutreturn_before_put_layout_hdr(lo); 300 pnfs_layoutreturn_before_put_layout_hdr(lo);
298 301
299 if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { 302 if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
@@ -1241,10 +1244,12 @@ retry:
1241 spin_lock(&ino->i_lock); 1244 spin_lock(&ino->i_lock);
1242 lo = nfsi->layout; 1245 lo = nfsi->layout;
1243 if (!lo || !pnfs_layout_is_valid(lo) || 1246 if (!lo || !pnfs_layout_is_valid(lo) ||
1244 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) 1247 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1248 lo = NULL;
1245 goto out_noroc; 1249 goto out_noroc;
1250 }
1251 pnfs_get_layout_hdr(lo);
1246 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) { 1252 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
1247 pnfs_get_layout_hdr(lo);
1248 spin_unlock(&ino->i_lock); 1253 spin_unlock(&ino->i_lock);
1249 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, 1254 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
1250 TASK_UNINTERRUPTIBLE); 1255 TASK_UNINTERRUPTIBLE);
@@ -1312,10 +1317,12 @@ out_noroc:
1312 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; 1317 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1313 if (ld->prepare_layoutreturn) 1318 if (ld->prepare_layoutreturn)
1314 ld->prepare_layoutreturn(args); 1319 ld->prepare_layoutreturn(args);
1320 pnfs_put_layout_hdr(lo);
1315 return true; 1321 return true;
1316 } 1322 }
1317 if (layoutreturn) 1323 if (layoutreturn)
1318 pnfs_send_layoutreturn(lo, &stateid, iomode, true); 1324 pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1325 pnfs_put_layout_hdr(lo);
1319 return false; 1326 return false;
1320} 1327}
1321 1328
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 29bacdc56f6a..5e470e233c83 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2631,6 +2631,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
2631 /* initial superblock/root creation */ 2631 /* initial superblock/root creation */
2632 mount_info->fill_super(s, mount_info); 2632 mount_info->fill_super(s, mount_info);
2633 nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); 2633 nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
2634 if (!(server->flags & NFS_MOUNT_UNSHARED))
2635 s->s_iflags |= SB_I_MULTIROOT;
2634 } 2636 }
2635 2637
2636 mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); 2638 mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7428a669d7a7..e7d8ceae8f26 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1876,40 +1876,43 @@ int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1876 return status; 1876 return status;
1877} 1877}
1878 1878
1879int nfs_commit_inode(struct inode *inode, int how) 1879static int __nfs_commit_inode(struct inode *inode, int how,
1880 struct writeback_control *wbc)
1880{ 1881{
1881 LIST_HEAD(head); 1882 LIST_HEAD(head);
1882 struct nfs_commit_info cinfo; 1883 struct nfs_commit_info cinfo;
1883 int may_wait = how & FLUSH_SYNC; 1884 int may_wait = how & FLUSH_SYNC;
1884 int error = 0; 1885 int ret, nscan;
1885 int res;
1886 1886
1887 nfs_init_cinfo_from_inode(&cinfo, inode); 1887 nfs_init_cinfo_from_inode(&cinfo, inode);
1888 nfs_commit_begin(cinfo.mds); 1888 nfs_commit_begin(cinfo.mds);
1889 res = nfs_scan_commit(inode, &head, &cinfo); 1889 for (;;) {
1890 if (res) 1890 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1891 error = nfs_generic_commit_list(inode, &head, how, &cinfo); 1891 if (ret <= 0)
1892 break;
1893 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1894 if (ret < 0)
1895 break;
1896 ret = 0;
1897 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1898 if (nscan < wbc->nr_to_write)
1899 wbc->nr_to_write -= nscan;
1900 else
1901 wbc->nr_to_write = 0;
1902 }
1903 if (nscan < INT_MAX)
1904 break;
1905 cond_resched();
1906 }
1892 nfs_commit_end(cinfo.mds); 1907 nfs_commit_end(cinfo.mds);
1893 if (res == 0) 1908 if (ret || !may_wait)
1894 return res; 1909 return ret;
1895 if (error < 0) 1910 return wait_on_commit(cinfo.mds);
1896 goto out_error; 1911}
1897 if (!may_wait) 1912
1898 goto out_mark_dirty; 1913int nfs_commit_inode(struct inode *inode, int how)
1899 error = wait_on_commit(cinfo.mds); 1914{
1900 if (error < 0) 1915 return __nfs_commit_inode(inode, how, NULL);
1901 return error;
1902 return res;
1903out_error:
1904 res = error;
1905 /* Note: If we exit without ensuring that the commit is complete,
1906 * we must mark the inode as dirty. Otherwise, future calls to
1907 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1908 * that the data is on the disk.
1909 */
1910out_mark_dirty:
1911 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1912 return res;
1913} 1916}
1914EXPORT_SYMBOL_GPL(nfs_commit_inode); 1917EXPORT_SYMBOL_GPL(nfs_commit_inode);
1915 1918
@@ -1919,11 +1922,11 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1919 int flags = FLUSH_SYNC; 1922 int flags = FLUSH_SYNC;
1920 int ret = 0; 1923 int ret = 0;
1921 1924
1922 /* no commits means nothing needs to be done */
1923 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1924 return ret;
1925
1926 if (wbc->sync_mode == WB_SYNC_NONE) { 1925 if (wbc->sync_mode == WB_SYNC_NONE) {
1926 /* no commits means nothing needs to be done */
1927 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1928 goto check_requests_outstanding;
1929
1927 /* Don't commit yet if this is a non-blocking flush and there 1930 /* Don't commit yet if this is a non-blocking flush and there
1928 * are a lot of outstanding writes for this mapping. 1931 * are a lot of outstanding writes for this mapping.
1929 */ 1932 */
@@ -1934,16 +1937,16 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1934 flags = 0; 1937 flags = 0;
1935 } 1938 }
1936 1939
1937 ret = nfs_commit_inode(inode, flags); 1940 ret = __nfs_commit_inode(inode, flags, wbc);
1938 if (ret >= 0) { 1941 if (!ret) {
1939 if (wbc->sync_mode == WB_SYNC_NONE) { 1942 if (flags & FLUSH_SYNC)
1940 if (ret < wbc->nr_to_write) 1943 return 0;
1941 wbc->nr_to_write -= ret; 1944 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1942 else 1945 goto out_mark_dirty;
1943 wbc->nr_to_write = 0; 1946
1944 } 1947check_requests_outstanding:
1945 return 0; 1948 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1946 } 1949 return ret;
1947out_mark_dirty: 1950out_mark_dirty:
1948 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1951 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1949 return ret; 1952 return ret;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 150521c9671b..61b770e39809 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -268,6 +268,35 @@ free_blocked_lock(struct nfsd4_blocked_lock *nbl)
268 kfree(nbl); 268 kfree(nbl);
269} 269}
270 270
271static void
272remove_blocked_locks(struct nfs4_lockowner *lo)
273{
274 struct nfs4_client *clp = lo->lo_owner.so_client;
275 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
276 struct nfsd4_blocked_lock *nbl;
277 LIST_HEAD(reaplist);
278
279 /* Dequeue all blocked locks */
280 spin_lock(&nn->blocked_locks_lock);
281 while (!list_empty(&lo->lo_blocked)) {
282 nbl = list_first_entry(&lo->lo_blocked,
283 struct nfsd4_blocked_lock,
284 nbl_list);
285 list_del_init(&nbl->nbl_list);
286 list_move(&nbl->nbl_lru, &reaplist);
287 }
288 spin_unlock(&nn->blocked_locks_lock);
289
290 /* Now free them */
291 while (!list_empty(&reaplist)) {
292 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
293 nbl_lru);
294 list_del_init(&nbl->nbl_lru);
295 posix_unblock_lock(&nbl->nbl_lock);
296 free_blocked_lock(nbl);
297 }
298}
299
271static int 300static int
272nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 301nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
273{ 302{
@@ -1866,6 +1895,7 @@ static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1866static void 1895static void
1867__destroy_client(struct nfs4_client *clp) 1896__destroy_client(struct nfs4_client *clp)
1868{ 1897{
1898 int i;
1869 struct nfs4_openowner *oo; 1899 struct nfs4_openowner *oo;
1870 struct nfs4_delegation *dp; 1900 struct nfs4_delegation *dp;
1871 struct list_head reaplist; 1901 struct list_head reaplist;
@@ -1895,6 +1925,16 @@ __destroy_client(struct nfs4_client *clp)
1895 nfs4_get_stateowner(&oo->oo_owner); 1925 nfs4_get_stateowner(&oo->oo_owner);
1896 release_openowner(oo); 1926 release_openowner(oo);
1897 } 1927 }
1928 for (i = 0; i < OWNER_HASH_SIZE; i++) {
1929 struct nfs4_stateowner *so, *tmp;
1930
1931 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1932 so_strhash) {
1933 /* Should be no openowners at this point */
1934 WARN_ON_ONCE(so->so_is_open_owner);
1935 remove_blocked_locks(lockowner(so));
1936 }
1937 }
1898 nfsd4_return_all_client_layouts(clp); 1938 nfsd4_return_all_client_layouts(clp);
1899 nfsd4_shutdown_callback(clp); 1939 nfsd4_shutdown_callback(clp);
1900 if (clp->cl_cb_conn.cb_xprt) 1940 if (clp->cl_cb_conn.cb_xprt)
@@ -6355,6 +6395,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
6355 } 6395 }
6356 spin_unlock(&clp->cl_lock); 6396 spin_unlock(&clp->cl_lock);
6357 free_ol_stateid_reaplist(&reaplist); 6397 free_ol_stateid_reaplist(&reaplist);
6398 remove_blocked_locks(lo);
6358 nfs4_put_stateowner(&lo->lo_owner); 6399 nfs4_put_stateowner(&lo->lo_owner);
6359 6400
6360 return status; 6401 return status;
@@ -7140,6 +7181,8 @@ nfs4_state_destroy_net(struct net *net)
7140 } 7181 }
7141 } 7182 }
7142 7183
7184 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7185
7143 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 7186 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7144 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 7187 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7145 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 7188 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
@@ -7206,7 +7249,6 @@ nfs4_state_shutdown_net(struct net *net)
7206 struct nfs4_delegation *dp = NULL; 7249 struct nfs4_delegation *dp = NULL;
7207 struct list_head *pos, *next, reaplist; 7250 struct list_head *pos, *next, reaplist;
7208 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7251 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7209 struct nfsd4_blocked_lock *nbl;
7210 7252
7211 cancel_delayed_work_sync(&nn->laundromat_work); 7253 cancel_delayed_work_sync(&nn->laundromat_work);
7212 locks_end_grace(&nn->nfsd4_manager); 7254 locks_end_grace(&nn->nfsd4_manager);
@@ -7227,24 +7269,6 @@ nfs4_state_shutdown_net(struct net *net)
7227 nfs4_put_stid(&dp->dl_stid); 7269 nfs4_put_stid(&dp->dl_stid);
7228 } 7270 }
7229 7271
7230 BUG_ON(!list_empty(&reaplist));
7231 spin_lock(&nn->blocked_locks_lock);
7232 while (!list_empty(&nn->blocked_locks_lru)) {
7233 nbl = list_first_entry(&nn->blocked_locks_lru,
7234 struct nfsd4_blocked_lock, nbl_lru);
7235 list_move(&nbl->nbl_lru, &reaplist);
7236 list_del_init(&nbl->nbl_list);
7237 }
7238 spin_unlock(&nn->blocked_locks_lock);
7239
7240 while (!list_empty(&reaplist)) {
7241 nbl = list_first_entry(&reaplist,
7242 struct nfsd4_blocked_lock, nbl_lru);
7243 list_del_init(&nbl->nbl_lru);
7244 posix_unblock_lock(&nbl->nbl_lock);
7245 free_blocked_lock(nbl);
7246 }
7247
7248 nfsd4_client_tracking_exit(net); 7272 nfsd4_client_tracking_exit(net);
7249 nfs4_state_destroy_net(net); 7273 nfs4_state_destroy_net(net);
7250} 7274}
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index 406e72de88f6..ce6ff5a0a6e4 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -24,6 +24,8 @@ config OVERLAY_FS_REDIRECT_DIR
24 an overlay which has redirects on a kernel that doesn't support this 24 an overlay which has redirects on a kernel that doesn't support this
25 feature will have unexpected results. 25 feature will have unexpected results.
26 26
27 If unsure, say N.
28
27config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW 29config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
28 bool "Overlayfs: follow redirects even if redirects are turned off" 30 bool "Overlayfs: follow redirects even if redirects are turned off"
29 default y 31 default y
@@ -32,8 +34,13 @@ config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
32 Disable this to get a possibly more secure configuration, but that 34 Disable this to get a possibly more secure configuration, but that
33 might not be backward compatible with previous kernels. 35 might not be backward compatible with previous kernels.
34 36
37 If backward compatibility is not an issue, then it is safe and
38 recommended to say N here.
39
35 For more information, see Documentation/filesystems/overlayfs.txt 40 For more information, see Documentation/filesystems/overlayfs.txt
36 41
42 If unsure, say Y.
43
37config OVERLAY_FS_INDEX 44config OVERLAY_FS_INDEX
38 bool "Overlayfs: turn on inodes index feature by default" 45 bool "Overlayfs: turn on inodes index feature by default"
39 depends on OVERLAY_FS 46 depends on OVERLAY_FS
@@ -51,6 +58,8 @@ config OVERLAY_FS_INDEX
51 That is, mounting an overlay which has an inodes index on a kernel 58 That is, mounting an overlay which has an inodes index on a kernel
52 that doesn't support this feature will have unexpected results. 59 that doesn't support this feature will have unexpected results.
53 60
61 If unsure, say N.
62
54config OVERLAY_FS_NFS_EXPORT 63config OVERLAY_FS_NFS_EXPORT
55 bool "Overlayfs: turn on NFS export feature by default" 64 bool "Overlayfs: turn on NFS export feature by default"
56 depends on OVERLAY_FS 65 depends on OVERLAY_FS
@@ -72,3 +81,8 @@ config OVERLAY_FS_NFS_EXPORT
72 Note, that the NFS export feature is not backward compatible. 81 Note, that the NFS export feature is not backward compatible.
73 That is, mounting an overlay which has a full index on a kernel 82 That is, mounting an overlay which has a full index on a kernel
74 that doesn't support this feature will have unexpected results. 83 that doesn't support this feature will have unexpected results.
84
85 Most users should say N here and enable this feature on a case-by-
86 case basis with the "nfs_export=on" mount option.
87
88 Say N unless you fully understand the consequences.
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index bb94ce9da5c8..87bd4148f4fb 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -19,6 +19,142 @@
19#include <linux/ratelimit.h> 19#include <linux/ratelimit.h>
20#include "overlayfs.h" 20#include "overlayfs.h"
21 21
22static int ovl_encode_maybe_copy_up(struct dentry *dentry)
23{
24 int err;
25
26 if (ovl_dentry_upper(dentry))
27 return 0;
28
29 err = ovl_want_write(dentry);
30 if (!err) {
31 err = ovl_copy_up(dentry);
32 ovl_drop_write(dentry);
33 }
34
35 if (err) {
36 pr_warn_ratelimited("overlayfs: failed to copy up on encode (%pd2, err=%i)\n",
37 dentry, err);
38 }
39
40 return err;
41}
42
43/*
44 * Before encoding a non-upper directory file handle from real layer N, we need
45 * to check if it will be possible to reconnect an overlay dentry from the real
46 * lower decoded dentry. This is done by following the overlay ancestry up to a
47 * "layer N connected" ancestor and verifying that all parents along the way are
48 * "layer N connectable". If an ancestor that is NOT "layer N connectable" is
49 * found, we need to copy up an ancestor, which is "layer N connectable", thus
50 * making that ancestor "layer N connected". For example:
51 *
52 * layer 1: /a
53 * layer 2: /a/b/c
54 *
55 * The overlay dentry /a is NOT "layer 2 connectable", because if dir /a is
56 * copied up and renamed, upper dir /a will be indexed by lower dir /a from
57 * layer 1. The dir /a from layer 2 will never be indexed, so the algorithm (*)
58 * in ovl_lookup_real_ancestor() will not be able to lookup a connected overlay
59 * dentry from the connected lower dentry /a/b/c.
60 *
61 * To avoid this problem on decode time, we need to copy up an ancestor of
62 * /a/b/c, which is "layer 2 connectable", on encode time. That ancestor is
63 * /a/b. After copy up (and index) of /a/b, it will become "layer 2 connected"
64 * and when the time comes to decode the file handle from lower dentry /a/b/c,
65 * ovl_lookup_real_ancestor() will find the indexed ancestor /a/b and decoding
66 * a connected overlay dentry will be accomplished.
67 *
68 * (*) the algorithm in ovl_lookup_real_ancestor() can be improved to lookup an
69 * entry /a in the lower layers above layer N and find the indexed dir /a from
70 * layer 1. If that improvement is made, then the check for "layer N connected"
71 * will need to verify there are no redirects in lower layers above N. In the
72 * example above, /a will be "layer 2 connectable". However, if layer 2 dir /a
73 * is a target of a layer 1 redirect, then /a will NOT be "layer 2 connectable":
74 *
75 * layer 1: /A (redirect = /a)
76 * layer 2: /a/b/c
77 */
78
79/* Return the lowest layer for encoding a connectable file handle */
80static int ovl_connectable_layer(struct dentry *dentry)
81{
82 struct ovl_entry *oe = OVL_E(dentry);
83
84 /* We can get overlay root from root of any layer */
85 if (dentry == dentry->d_sb->s_root)
86 return oe->numlower;
87
88 /*
89 * If it's an unindexed merge dir, then it's not connectable with any
90 * lower layer
91 */
92 if (ovl_dentry_upper(dentry) &&
93 !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
94 return 0;
95
96 /* We can get upper/overlay path from indexed/lower dentry */
97 return oe->lowerstack[0].layer->idx;
98}
99
100/*
101 * @dentry is "connected" if all ancestors up to root or a "connected" ancestor
102 * have the same uppermost lower layer as the origin's layer. We may need to
103 * copy up a "connectable" ancestor to make it "connected". A "connected" dentry
104 * cannot become non "connected", so cache positive result in dentry flags.
105 *
106 * Return the connected origin layer or < 0 on error.
107 */
108static int ovl_connect_layer(struct dentry *dentry)
109{
110 struct dentry *next, *parent = NULL;
111 int origin_layer;
112 int err = 0;
113
114 if (WARN_ON(dentry == dentry->d_sb->s_root) ||
115 WARN_ON(!ovl_dentry_lower(dentry)))
116 return -EIO;
117
118 origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx;
119 if (ovl_dentry_test_flag(OVL_E_CONNECTED, dentry))
120 return origin_layer;
121
122 /* Find the topmost origin layer connectable ancestor of @dentry */
123 next = dget(dentry);
124 for (;;) {
125 parent = dget_parent(next);
126 if (WARN_ON(parent == next)) {
127 err = -EIO;
128 break;
129 }
130
131 /*
132 * If @parent is not origin layer connectable, then copy up
133 * @next which is origin layer connectable and we are done.
134 */
135 if (ovl_connectable_layer(parent) < origin_layer) {
136 err = ovl_encode_maybe_copy_up(next);
137 break;
138 }
139
140 /* If @parent is connected or indexed we are done */
141 if (ovl_dentry_test_flag(OVL_E_CONNECTED, parent) ||
142 ovl_test_flag(OVL_INDEX, d_inode(parent)))
143 break;
144
145 dput(next);
146 next = parent;
147 }
148
149 dput(parent);
150 dput(next);
151
152 if (!err)
153 ovl_dentry_set_flag(OVL_E_CONNECTED, dentry);
154
155 return err ?: origin_layer;
156}
157
22/* 158/*
23 * We only need to encode origin if there is a chance that the same object was 159 * We only need to encode origin if there is a chance that the same object was
24 * encoded pre copy up and then we need to stay consistent with the same 160 * encoded pre copy up and then we need to stay consistent with the same
@@ -41,73 +177,59 @@
41 * L = lower file handle 177 * L = lower file handle
42 * 178 *
43 * (*) Connecting an overlay dir from real lower dentry is not always 179 * (*) Connecting an overlay dir from real lower dentry is not always
44 * possible when there are redirects in lower layers. To mitigate this case, 180 * possible when there are redirects in lower layers and non-indexed merge dirs.
45 * we copy up the lower dir first and then encode an upper dir file handle. 181 * To mitigate those case, we may copy up the lower dir ancestor before encode
182 * a lower dir file handle.
183 *
184 * Return 0 for upper file handle, > 0 for lower file handle or < 0 on error.
46 */ 185 */
47static bool ovl_should_encode_origin(struct dentry *dentry) 186static int ovl_check_encode_origin(struct dentry *dentry)
48{ 187{
49 struct ovl_fs *ofs = dentry->d_sb->s_fs_info; 188 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
50 189
190 /* Upper file handle for pure upper */
51 if (!ovl_dentry_lower(dentry)) 191 if (!ovl_dentry_lower(dentry))
52 return false; 192 return 0;
53 193
54 /* 194 /*
55 * Decoding a merge dir, whose origin's parent is under a redirected 195 * Upper file handle for non-indexed upper.
56 * lower dir is not always possible. As a simple aproximation, we do
57 * not encode lower dir file handles when overlay has multiple lower
58 * layers and origin is below the topmost lower layer.
59 * 196 *
60 * TODO: copy up only the parent that is under redirected lower. 197 * Root is never indexed, so if there's an upper layer, encode upper for
198 * root.
61 */ 199 */
62 if (d_is_dir(dentry) && ofs->upper_mnt &&
63 OVL_E(dentry)->lowerstack[0].layer->idx > 1)
64 return false;
65
66 /* Decoding a non-indexed upper from origin is not implemented */
67 if (ovl_dentry_upper(dentry) && 200 if (ovl_dentry_upper(dentry) &&
68 !ovl_test_flag(OVL_INDEX, d_inode(dentry))) 201 !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
69 return false;
70
71 return true;
72}
73
74static int ovl_encode_maybe_copy_up(struct dentry *dentry)
75{
76 int err;
77
78 if (ovl_dentry_upper(dentry))
79 return 0; 202 return 0;
80 203
81 err = ovl_want_write(dentry); 204 /*
82 if (err) 205 * Decoding a merge dir, whose origin's ancestor is under a redirected
83 return err; 206 * lower dir or under a non-indexed upper is not always possible.
84 207 * ovl_connect_layer() will try to make origin's layer "connected" by
85 err = ovl_copy_up(dentry); 208 * copying up a "connectable" ancestor.
209 */
210 if (d_is_dir(dentry) && ofs->upper_mnt)
211 return ovl_connect_layer(dentry);
86 212
87 ovl_drop_write(dentry); 213 /* Lower file handle for indexed and non-upper dir/non-dir */
88 return err; 214 return 1;
89} 215}
90 216
91static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen) 217static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
92{ 218{
93 struct dentry *origin = ovl_dentry_lower(dentry);
94 struct ovl_fh *fh = NULL; 219 struct ovl_fh *fh = NULL;
95 int err; 220 int err, enc_lower;
96 221
97 /* 222 /*
98 * If we should not encode a lower dir file handle, copy up and encode 223 * Check if we should encode a lower or upper file handle and maybe
99 * an upper dir file handle. 224 * copy up an ancestor to make lower file handle connectable.
100 */ 225 */
101 if (!ovl_should_encode_origin(dentry)) { 226 err = enc_lower = ovl_check_encode_origin(dentry);
102 err = ovl_encode_maybe_copy_up(dentry); 227 if (enc_lower < 0)
103 if (err) 228 goto fail;
104 goto fail;
105
106 origin = NULL;
107 }
108 229
109 /* Encode an upper or origin file handle */ 230 /* Encode an upper or lower file handle */
110 fh = ovl_encode_fh(origin ?: ovl_dentry_upper(dentry), !origin); 231 fh = ovl_encode_fh(enc_lower ? ovl_dentry_lower(dentry) :
232 ovl_dentry_upper(dentry), !enc_lower);
111 err = PTR_ERR(fh); 233 err = PTR_ERR(fh);
112 if (IS_ERR(fh)) 234 if (IS_ERR(fh))
113 goto fail; 235 goto fail;
@@ -355,8 +477,8 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb,
355 dput(upper); 477 dput(upper);
356 } 478 }
357 479
358 if (!this) 480 if (IS_ERR_OR_NULL(this))
359 return NULL; 481 return this;
360 482
361 if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { 483 if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) {
362 dput(this); 484 dput(this);
@@ -498,7 +620,7 @@ static struct dentry *ovl_lookup_real(struct super_block *sb,
498 if (err == -ECHILD) { 620 if (err == -ECHILD) {
499 this = ovl_lookup_real_ancestor(sb, real, 621 this = ovl_lookup_real_ancestor(sb, real,
500 layer); 622 layer);
501 err = IS_ERR(this) ? PTR_ERR(this) : 0; 623 err = PTR_ERR_OR_ZERO(this);
502 } 624 }
503 if (!err) { 625 if (!err) {
504 dput(connected); 626 dput(connected);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index fcd97b783fa1..3b1bd469accd 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -669,38 +669,59 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
669 return inode; 669 return inode;
670} 670}
671 671
672/*
673 * Does overlay inode need to be hashed by lower inode?
674 */
675static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
676 struct dentry *lower, struct dentry *index)
677{
678 struct ovl_fs *ofs = sb->s_fs_info;
679
680 /* No, if pure upper */
681 if (!lower)
682 return false;
683
684 /* Yes, if already indexed */
685 if (index)
686 return true;
687
688 /* Yes, if won't be copied up */
689 if (!ofs->upper_mnt)
690 return true;
691
692 /* No, if lower hardlink is or will be broken on copy up */
693 if ((upper || !ovl_indexdir(sb)) &&
694 !d_is_dir(lower) && d_inode(lower)->i_nlink > 1)
695 return false;
696
697 /* No, if non-indexed upper with NFS export */
698 if (sb->s_export_op && upper)
699 return false;
700
701 /* Otherwise, hash by lower inode for fsnotify */
702 return true;
703}
704
672struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry, 705struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
673 struct dentry *lowerdentry, struct dentry *index, 706 struct dentry *lowerdentry, struct dentry *index,
674 unsigned int numlower) 707 unsigned int numlower)
675{ 708{
676 struct ovl_fs *ofs = sb->s_fs_info;
677 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; 709 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
678 struct inode *inode; 710 struct inode *inode;
679 /* Already indexed or could be indexed on copy up? */ 711 bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index);
680 bool indexed = (index || (ovl_indexdir(sb) && !upperdentry));
681 struct dentry *origin = indexed ? lowerdentry : NULL;
682 bool is_dir; 712 bool is_dir;
683 713
684 if (WARN_ON(upperdentry && indexed && !lowerdentry))
685 return ERR_PTR(-EIO);
686
687 if (!realinode) 714 if (!realinode)
688 realinode = d_inode(lowerdentry); 715 realinode = d_inode(lowerdentry);
689 716
690 /* 717 /*
691 * Copy up origin (lower) may exist for non-indexed non-dir upper, but 718 * Copy up origin (lower) may exist for non-indexed upper, but we must
692 * we must not use lower as hash key in that case. 719 * not use lower as hash key if this is a broken hardlink.
693 * Hash non-dir that is or could be indexed by origin inode.
694 * Hash dir that is or could be merged by origin inode.
695 * Hash pure upper and non-indexed non-dir by upper inode.
696 * Hash non-indexed dir by upper inode for NFS export.
697 */ 720 */
698 is_dir = S_ISDIR(realinode->i_mode); 721 is_dir = S_ISDIR(realinode->i_mode);
699 if (is_dir && (indexed || !sb->s_export_op || !ofs->upper_mnt)) 722 if (upperdentry || bylower) {
700 origin = lowerdentry; 723 struct inode *key = d_inode(bylower ? lowerdentry :
701 724 upperdentry);
702 if (upperdentry || origin) {
703 struct inode *key = d_inode(origin ?: upperdentry);
704 unsigned int nlink = is_dir ? 1 : realinode->i_nlink; 725 unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
705 726
706 inode = iget5_locked(sb, (unsigned long) key, 727 inode = iget5_locked(sb, (unsigned long) key,
@@ -728,6 +749,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
728 nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink); 749 nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
729 set_nlink(inode, nlink); 750 set_nlink(inode, nlink);
730 } else { 751 } else {
752 /* Lower hardlink that will be broken on copy up */
731 inode = new_inode(sb); 753 inode = new_inode(sb);
732 if (!inode) 754 if (!inode)
733 goto out_nomem; 755 goto out_nomem;
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index de3e6da1d5a5..70fcfcc684cc 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -913,9 +913,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
913 stack[ctr].layer = lower.layer; 913 stack[ctr].layer = lower.layer;
914 ctr++; 914 ctr++;
915 915
916 if (d.stop)
917 break;
918
919 /* 916 /*
920 * Following redirects can have security consequences: it's like 917 * Following redirects can have security consequences: it's like
921 * a symlink into the lower layer without the permission checks. 918 * a symlink into the lower layer without the permission checks.
@@ -933,6 +930,9 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
933 goto out_put; 930 goto out_put;
934 } 931 }
935 932
933 if (d.stop)
934 break;
935
936 if (d.redirect && d.redirect[0] == '/' && poe != roe) { 936 if (d.redirect && d.redirect[0] == '/' && poe != roe) {
937 poe = roe; 937 poe = roe;
938 /* Find the current layer on the root dentry */ 938 /* Find the current layer on the root dentry */
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 0df25a9c94bd..225ff1171147 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -40,6 +40,7 @@ enum ovl_inode_flag {
40enum ovl_entry_flag { 40enum ovl_entry_flag {
41 OVL_E_UPPER_ALIAS, 41 OVL_E_UPPER_ALIAS,
42 OVL_E_OPAQUE, 42 OVL_E_OPAQUE,
43 OVL_E_CONNECTED,
43}; 44};
44 45
45/* 46/*
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 9ee37c76091d..7c24619ae7fc 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1359,6 +1359,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1359 1359
1360 /* Root is always merge -> can have whiteouts */ 1360 /* Root is always merge -> can have whiteouts */
1361 ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry)); 1361 ovl_set_flag(OVL_WHITEOUTS, d_inode(root_dentry));
1362 ovl_dentry_set_flag(OVL_E_CONNECTED, root_dentry);
1362 ovl_inode_init(d_inode(root_dentry), upperpath.dentry, 1363 ovl_inode_init(d_inode(root_dentry), upperpath.dentry,
1363 ovl_dentry_lower(root_dentry)); 1364 ovl_dentry_lower(root_dentry));
1364 1365
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 8664db25a9a6..215c225b2ca1 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -106,6 +106,7 @@ int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target,
106{ 106{
107 return sysfs_do_create_link(kobj, target, name, 0); 107 return sysfs_do_create_link(kobj, target, name, 0);
108} 108}
109EXPORT_SYMBOL_GPL(sysfs_create_link_nowarn);
109 110
110/** 111/**
111 * sysfs_delete_link - remove symlink in object's directory. 112 * sysfs_delete_link - remove symlink in object's directory.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 66e1edbfb2b2..046469fcc1b8 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -955,15 +955,29 @@ static inline bool imap_needs_alloc(struct inode *inode,
955 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN); 955 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
956} 956}
957 957
958static inline bool needs_cow_for_zeroing(struct xfs_bmbt_irec *imap, int nimaps)
959{
960 return nimaps &&
961 imap->br_startblock != HOLESTARTBLOCK &&
962 imap->br_state != XFS_EXT_UNWRITTEN;
963}
964
958static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags) 965static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
959{ 966{
960 /* 967 /*
961 * COW writes will allocate delalloc space, so we need to make sure 968 * COW writes may allocate delalloc space or convert unwritten COW
962 * to take the lock exclusively here. 969 * extents, so we need to make sure to take the lock exclusively here.
963 */ 970 */
964 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) 971 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
965 return true; 972 return true;
966 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE)) 973
974 /*
975 * Extents not yet cached requires exclusive access, don't block.
976 * This is an opencoded xfs_ilock_data_map_shared() to cater for the
977 * non-blocking behaviour.
978 */
979 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
980 !(ip->i_df.if_flags & XFS_IFEXTENTS))
967 return true; 981 return true;
968 return false; 982 return false;
969} 983}
@@ -993,16 +1007,18 @@ xfs_file_iomap_begin(
993 return xfs_file_iomap_begin_delay(inode, offset, length, iomap); 1007 return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
994 } 1008 }
995 1009
996 if (need_excl_ilock(ip, flags)) { 1010 if (need_excl_ilock(ip, flags))
997 lockmode = XFS_ILOCK_EXCL; 1011 lockmode = XFS_ILOCK_EXCL;
998 xfs_ilock(ip, XFS_ILOCK_EXCL); 1012 else
999 } else { 1013 lockmode = XFS_ILOCK_SHARED;
1000 lockmode = xfs_ilock_data_map_shared(ip);
1001 }
1002 1014
1003 if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) { 1015 if (flags & IOMAP_NOWAIT) {
1004 error = -EAGAIN; 1016 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
1005 goto out_unlock; 1017 return -EAGAIN;
1018 if (!xfs_ilock_nowait(ip, lockmode))
1019 return -EAGAIN;
1020 } else {
1021 xfs_ilock(ip, lockmode);
1006 } 1022 }
1007 1023
1008 ASSERT(offset <= mp->m_super->s_maxbytes); 1024 ASSERT(offset <= mp->m_super->s_maxbytes);
@@ -1024,7 +1040,9 @@ xfs_file_iomap_begin(
1024 goto out_unlock; 1040 goto out_unlock;
1025 } 1041 }
1026 1042
1027 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) { 1043 if (xfs_is_reflink_inode(ip) &&
1044 ((flags & IOMAP_WRITE) ||
1045 ((flags & IOMAP_ZERO) && needs_cow_for_zeroing(&imap, nimaps)))) {
1028 if (flags & IOMAP_DIRECT) { 1046 if (flags & IOMAP_DIRECT) {
1029 /* 1047 /*
1030 * A reflinked inode will result in CoW alloc. 1048 * A reflinked inode will result in CoW alloc.
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2cfa3075d148..bfbb44a5ad38 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -983,6 +983,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
983int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 983int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
984int pud_clear_huge(pud_t *pud); 984int pud_clear_huge(pud_t *pud);
985int pmd_clear_huge(pmd_t *pmd); 985int pmd_clear_huge(pmd_t *pmd);
986int pud_free_pmd_page(pud_t *pud);
987int pmd_free_pte_page(pmd_t *pmd);
986#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 988#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
987static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 989static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
988{ 990{
@@ -1008,6 +1010,14 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1008{ 1010{
1009 return 0; 1011 return 0;
1010} 1012}
1013static inline int pud_free_pmd_page(pud_t *pud)
1014{
1015 return 0;
1016}
1017static inline int pmd_free_pte_page(pmd_t *pmd)
1018{
1019 return 0;
1020}
1011#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 1021#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
1012 1022
1013#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 1023#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index cdbd142ca7f2..02924ae2527e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
360bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); 360bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
361void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); 361void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
362void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); 362void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
363void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
363 364
364void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 365void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
365 366
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 9f242b876fde..f8e76d01a5ad 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -755,13 +755,13 @@ struct sock_cgroup_data {
755 * updaters and return part of the previous pointer as the prioidx or 755 * updaters and return part of the previous pointer as the prioidx or
756 * classid. Such races are short-lived and the result isn't critical. 756 * classid. Such races are short-lived and the result isn't critical.
757 */ 757 */
758static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd) 758static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
759{ 759{
760 /* fallback to 1 which is always the ID of the root cgroup */ 760 /* fallback to 1 which is always the ID of the root cgroup */
761 return (skcd->is_data & 1) ? skcd->prioidx : 1; 761 return (skcd->is_data & 1) ? skcd->prioidx : 1;
762} 762}
763 763
764static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd) 764static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
765{ 765{
766 /* fallback to 0 which is the unconfigured default classid */ 766 /* fallback to 0 which is the unconfigured default classid */
767 return (skcd->is_data & 1) ? skcd->classid : 0; 767 return (skcd->is_data & 1) ? skcd->classid : 0;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 8a9643857c4a..16c3027074a2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -17,6 +17,7 @@
17#include <linux/if.h> 17#include <linux/if.h>
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/aio_abi.h> /* for aio_context_t */ 19#include <linux/aio_abi.h> /* for aio_context_t */
20#include <linux/uaccess.h>
20#include <linux/unistd.h> 21#include <linux/unistd.h>
21 22
22#include <asm/compat.h> 23#include <asm/compat.h>
@@ -229,13 +230,13 @@ typedef struct compat_siginfo {
229 short int _addr_lsb; /* Valid LSB of the reported address. */ 230 short int _addr_lsb; /* Valid LSB of the reported address. */
230 /* used when si_code=SEGV_BNDERR */ 231 /* used when si_code=SEGV_BNDERR */
231 struct { 232 struct {
232 short _dummy_bnd; 233 compat_uptr_t _dummy_bnd;
233 compat_uptr_t _lower; 234 compat_uptr_t _lower;
234 compat_uptr_t _upper; 235 compat_uptr_t _upper;
235 } _addr_bnd; 236 } _addr_bnd;
236 /* used when si_code=SEGV_PKUERR */ 237 /* used when si_code=SEGV_PKUERR */
237 struct { 238 struct {
238 short _dummy_pkey; 239 compat_uptr_t _dummy_pkey;
239 u32 _pkey; 240 u32 _pkey;
240 } _addr_pkey; 241 } _addr_pkey;
241 }; 242 };
@@ -550,8 +551,29 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
550asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 551asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
551 552
552extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat); 553extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
553extern int put_compat_sigset(compat_sigset_t __user *compat, 554
554 const sigset_t *set, unsigned int size); 555/*
556 * Defined inline such that size can be compile time constant, which avoids
557 * CONFIG_HARDENED_USERCOPY complaining about copies from task_struct
558 */
559static inline int
560put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
561 unsigned int size)
562{
563 /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
564#ifdef __BIG_ENDIAN
565 compat_sigset_t v;
566 switch (_NSIG_WORDS) {
567 case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
568 case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
569 case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
570 case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
571 }
572 return copy_to_user(compat, &v, size) ? -EFAULT : 0;
573#else
574 return copy_to_user(compat, set, size) ? -EFAULT : 0;
575#endif
576}
555 577
556asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, 578asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
557 compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, 579 compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 79c413985305..c6baf767619e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1317,6 +1317,7 @@ extern int send_sigurg(struct fown_struct *fown);
1317#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ 1317#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1318#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ 1318#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
1319#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */ 1319#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
1320#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */
1320 1321
1321/* sb->s_iflags to limit user namespace mounts */ 1322/* sb->s_iflags to limit user namespace mounts */
1322#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ 1323#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index c5b0a75a7812..fd00170b494f 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -25,6 +25,7 @@ struct ptr_ring *tun_get_tx_ring(struct file *file);
25bool tun_is_xdp_buff(void *ptr); 25bool tun_is_xdp_buff(void *ptr);
26void *tun_xdp_to_ptr(void *ptr); 26void *tun_xdp_to_ptr(void *ptr);
27void *tun_ptr_to_xdp(void *ptr); 27void *tun_ptr_to_xdp(void *ptr);
28void tun_ptr_free(void *ptr);
28#else 29#else
29#include <linux/err.h> 30#include <linux/err.h>
30#include <linux/errno.h> 31#include <linux/errno.h>
@@ -50,5 +51,8 @@ static inline void *tun_ptr_to_xdp(void *ptr)
50{ 51{
51 return NULL; 52 return NULL;
52} 53}
54static inline void tun_ptr_free(void *ptr)
55{
56}
53#endif /* CONFIG_TUN */ 57#endif /* CONFIG_TUN */
54#endif /* __IF_TUN_H */ 58#endif /* __IF_TUN_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 5e6a2d4dc366..c4a1cff9c768 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -300,30 +300,34 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
300} 300}
301 301
302/** 302/**
303 * __vlan_insert_tag - regular VLAN tag inserting 303 * __vlan_insert_inner_tag - inner VLAN tag inserting
304 * @skb: skbuff to tag 304 * @skb: skbuff to tag
305 * @vlan_proto: VLAN encapsulation protocol 305 * @vlan_proto: VLAN encapsulation protocol
306 * @vlan_tci: VLAN TCI to insert 306 * @vlan_tci: VLAN TCI to insert
307 * @mac_len: MAC header length including outer vlan headers
307 * 308 *
308 * Inserts the VLAN tag into @skb as part of the payload 309 * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
309 * Returns error if skb_cow_head failes. 310 * Returns error if skb_cow_head failes.
310 * 311 *
311 * Does not change skb->protocol so this function can be used during receive. 312 * Does not change skb->protocol so this function can be used during receive.
312 */ 313 */
313static inline int __vlan_insert_tag(struct sk_buff *skb, 314static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
314 __be16 vlan_proto, u16 vlan_tci) 315 __be16 vlan_proto, u16 vlan_tci,
316 unsigned int mac_len)
315{ 317{
316 struct vlan_ethhdr *veth; 318 struct vlan_ethhdr *veth;
317 319
318 if (skb_cow_head(skb, VLAN_HLEN) < 0) 320 if (skb_cow_head(skb, VLAN_HLEN) < 0)
319 return -ENOMEM; 321 return -ENOMEM;
320 322
321 veth = skb_push(skb, VLAN_HLEN); 323 skb_push(skb, VLAN_HLEN);
322 324
323 /* Move the mac addresses to the beginning of the new header. */ 325 /* Move the mac header sans proto to the beginning of the new header. */
324 memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); 326 memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
325 skb->mac_header -= VLAN_HLEN; 327 skb->mac_header -= VLAN_HLEN;
326 328
329 veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
330
327 /* first, the ethernet type */ 331 /* first, the ethernet type */
328 veth->h_vlan_proto = vlan_proto; 332 veth->h_vlan_proto = vlan_proto;
329 333
@@ -334,12 +338,30 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
334} 338}
335 339
336/** 340/**
337 * vlan_insert_tag - regular VLAN tag inserting 341 * __vlan_insert_tag - regular VLAN tag inserting
338 * @skb: skbuff to tag 342 * @skb: skbuff to tag
339 * @vlan_proto: VLAN encapsulation protocol 343 * @vlan_proto: VLAN encapsulation protocol
340 * @vlan_tci: VLAN TCI to insert 344 * @vlan_tci: VLAN TCI to insert
341 * 345 *
342 * Inserts the VLAN tag into @skb as part of the payload 346 * Inserts the VLAN tag into @skb as part of the payload
347 * Returns error if skb_cow_head failes.
348 *
349 * Does not change skb->protocol so this function can be used during receive.
350 */
351static inline int __vlan_insert_tag(struct sk_buff *skb,
352 __be16 vlan_proto, u16 vlan_tci)
353{
354 return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
355}
356
357/**
358 * vlan_insert_inner_tag - inner VLAN tag inserting
359 * @skb: skbuff to tag
360 * @vlan_proto: VLAN encapsulation protocol
361 * @vlan_tci: VLAN TCI to insert
362 * @mac_len: MAC header length including outer vlan headers
363 *
364 * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
343 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 365 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
344 * 366 *
345 * Following the skb_unshare() example, in case of error, the calling function 367 * Following the skb_unshare() example, in case of error, the calling function
@@ -347,12 +369,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
347 * 369 *
348 * Does not change skb->protocol so this function can be used during receive. 370 * Does not change skb->protocol so this function can be used during receive.
349 */ 371 */
350static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, 372static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
351 __be16 vlan_proto, u16 vlan_tci) 373 __be16 vlan_proto,
374 u16 vlan_tci,
375 unsigned int mac_len)
352{ 376{
353 int err; 377 int err;
354 378
355 err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); 379 err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
356 if (err) { 380 if (err) {
357 dev_kfree_skb_any(skb); 381 dev_kfree_skb_any(skb);
358 return NULL; 382 return NULL;
@@ -361,6 +385,26 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
361} 385}
362 386
363/** 387/**
388 * vlan_insert_tag - regular VLAN tag inserting
389 * @skb: skbuff to tag
390 * @vlan_proto: VLAN encapsulation protocol
391 * @vlan_tci: VLAN TCI to insert
392 *
393 * Inserts the VLAN tag into @skb as part of the payload
394 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
395 *
396 * Following the skb_unshare() example, in case of error, the calling function
397 * doesn't have to worry about freeing the original skb.
398 *
399 * Does not change skb->protocol so this function can be used during receive.
400 */
401static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
402 __be16 vlan_proto, u16 vlan_tci)
403{
404 return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
405}
406
407/**
364 * vlan_insert_tag_set_proto - regular VLAN tag inserting 408 * vlan_insert_tag_set_proto - regular VLAN tag inserting
365 * @skb: skbuff to tag 409 * @skb: skbuff to tag
366 * @vlan_proto: VLAN encapsulation protocol 410 * @vlan_proto: VLAN encapsulation protocol
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c00c4c33e432..b26eccc78fb1 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -503,6 +503,7 @@
503 503
504#define ICH_HCR_EN (1 << 0) 504#define ICH_HCR_EN (1 << 0)
505#define ICH_HCR_UIE (1 << 1) 505#define ICH_HCR_UIE (1 << 1)
506#define ICH_HCR_NPIE (1 << 3)
506#define ICH_HCR_TC (1 << 10) 507#define ICH_HCR_TC (1 << 10)
507#define ICH_HCR_TALL0 (1 << 11) 508#define ICH_HCR_TALL0 (1 << 11)
508#define ICH_HCR_TALL1 (1 << 12) 509#define ICH_HCR_TALL1 (1 << 12)
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index d3453ee072fc..68d8b1f73682 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -84,6 +84,7 @@
84 84
85#define GICH_HCR_EN (1 << 0) 85#define GICH_HCR_EN (1 << 0)
86#define GICH_HCR_UIE (1 << 1) 86#define GICH_HCR_UIE (1 << 1)
87#define GICH_HCR_NPIE (1 << 3)
87 88
88#define GICH_LR_VIRTUALID (0x3ff << 0) 89#define GICH_LR_VIRTUALID (0x3ff << 0)
89#define GICH_LR_PHYSID_CPUID_SHIFT (10) 90#define GICH_LR_PHYSID_CPUID_SHIFT (10)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8be5077efb5f..f92ea7783652 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -187,7 +187,6 @@ int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
187 unsigned long *end_pfn); 187 unsigned long *end_pfn);
188void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 188void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
189 unsigned long *out_end_pfn, int *out_nid); 189 unsigned long *out_end_pfn, int *out_nid);
190unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn);
191 190
192/** 191/**
193 * for_each_mem_pfn_range - early memory pfn range iterator 192 * for_each_mem_pfn_range - early memory pfn range iterator
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 4814cad7456e..cded85ab6fe4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -462,8 +462,8 @@ struct mlx5_core_srq {
462 struct mlx5_core_rsc_common common; /* must be first */ 462 struct mlx5_core_rsc_common common; /* must be first */
463 u32 srqn; 463 u32 srqn;
464 int max; 464 int max;
465 int max_gs; 465 size_t max_gs;
466 int max_avail_gather; 466 size_t max_avail_gather;
467 int wqe_shift; 467 int wqe_shift;
468 void (*event) (struct mlx5_core_srq *, enum mlx5_event); 468 void (*event) (struct mlx5_core_srq *, enum mlx5_event);
469 469
diff --git a/include/linux/net.h b/include/linux/net.h
index 000d1aada74f..2248a052061d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -222,6 +222,7 @@ enum {
222int sock_wake_async(struct socket_wq *sk_wq, int how, int band); 222int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
223int sock_register(const struct net_proto_family *fam); 223int sock_register(const struct net_proto_family *fam);
224void sock_unregister(int family); 224void sock_unregister(int family);
225bool sock_is_registered(int family);
225int __sock_create(struct net *net, int family, int type, int proto, 226int __sock_create(struct net *net, int family, int type, int proto,
226 struct socket **res, int kern); 227 struct socket **res, int kern);
227int sock_create(int family, int type, int proto, struct socket **res); 228int sock_create(int family, int type, int proto, struct socket **res);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 1313b35c3ab7..14529511c4b8 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -285,6 +285,8 @@ unsigned int *xt_alloc_entry_offsets(unsigned int size);
285bool xt_find_jump_offset(const unsigned int *offsets, 285bool xt_find_jump_offset(const unsigned int *offsets,
286 unsigned int target, unsigned int size); 286 unsigned int target, unsigned int size);
287 287
288int xt_check_proc_name(const char *name, unsigned int size);
289
288int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, 290int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
289 bool inv_proto); 291 bool inv_proto);
290int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, 292int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 88865e0ebf4d..091033a6b836 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -13,7 +13,6 @@ struct device_node;
13struct device_node *of_pci_find_child_device(struct device_node *parent, 13struct device_node *of_pci_find_child_device(struct device_node *parent,
14 unsigned int devfn); 14 unsigned int devfn);
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
17int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 16int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
18int of_get_pci_domain_nr(struct device_node *node); 17int of_get_pci_domain_nr(struct device_node *node);
19int of_pci_get_max_link_speed(struct device_node *node); 18int of_pci_get_max_link_speed(struct device_node *node);
@@ -34,12 +33,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
34} 33}
35 34
36static inline int 35static inline int
37of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
38{
39 return 0;
40}
41
42static inline int
43of_pci_parse_bus_range(struct device_node *node, struct resource *res) 36of_pci_parse_bus_range(struct device_node *node, struct resource *res)
44{ 37{
45 return -EINVAL; 38 return -EINVAL;
@@ -67,6 +60,16 @@ of_pci_get_max_link_speed(struct device_node *node)
67static inline void of_pci_check_probe_only(void) { } 60static inline void of_pci_check_probe_only(void) { }
68#endif 61#endif
69 62
63#if IS_ENABLED(CONFIG_OF_IRQ)
64int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
65#else
66static inline int
67of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
68{
69 return 0;
70}
71#endif
72
70#if defined(CONFIG_OF_ADDRESS) 73#if defined(CONFIG_OF_ADDRESS)
71int of_pci_get_host_bridge_resources(struct device_node *dev, 74int of_pci_get_host_bridge_resources(struct device_node *dev,
72 unsigned char busno, unsigned char bus_max, 75 unsigned char busno, unsigned char bus_max,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 864d167a1073..009cdf3d65b6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -30,10 +30,14 @@
30 * calls io_destroy() or the process exits. 30 * calls io_destroy() or the process exits.
31 * 31 *
32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it 32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
33 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove 33 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
34 * the kioctx from the proccess's list of kioctxs - after that, there can't be 34 * After that, there can't be any new users of the kioctx (from lookup_ioctx())
35 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop 35 * and it's then safe to drop the initial ref with percpu_ref_put().
36 * the initial ref with percpu_ref_put(). 36 *
37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
38 * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
40 * with RCU protection, it must be done explicitly.
37 * 41 *
38 * Code that does a two stage shutdown like this often needs some kind of 42 * Code that does a two stage shutdown like this often needs some kind of
39 * explicit synchronization to ensure the initial refcount can only be dropped 43 * explicit synchronization to ensure the initial refcount can only be dropped
@@ -113,8 +117,10 @@ void percpu_ref_reinit(struct percpu_ref *ref);
113 * Must be used to drop the initial ref on a percpu refcount; must be called 117 * Must be used to drop the initial ref on a percpu refcount; must be called
114 * precisely once before shutdown. 118 * precisely once before shutdown.
115 * 119 *
116 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the 120 * Switches @ref into atomic mode before gathering up the percpu counters
117 * percpu counters and dropping the initial ref. 121 * and dropping the initial ref.
122 *
123 * There are no implied RCU grace periods between kill and release.
118 */ 124 */
119static inline void percpu_ref_kill(struct percpu_ref *ref) 125static inline void percpu_ref_kill(struct percpu_ref *ref)
120{ 126{
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 5a9b1753fdc5..f0b5870a6d40 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -984,6 +984,10 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
984{ 984{
985 return 0; 985 return 0;
986} 986}
987int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
988 u16 regnum);
989int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
990 u16 regnum, u16 val);
987 991
988/* Clause 45 PHY */ 992/* Clause 45 PHY */
989int genphy_c45_restart_aneg(struct phy_device *phydev); 993int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1020,7 +1024,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
1020int phy_drivers_register(struct phy_driver *new_driver, int n, 1024int phy_drivers_register(struct phy_driver *new_driver, int n,
1021 struct module *owner); 1025 struct module *owner);
1022void phy_state_machine(struct work_struct *work); 1026void phy_state_machine(struct work_struct *work);
1023void phy_change(struct phy_device *phydev);
1024void phy_change_work(struct work_struct *work); 1027void phy_change_work(struct work_struct *work);
1025void phy_mac_interrupt(struct phy_device *phydev); 1028void phy_mac_interrupt(struct phy_device *phydev);
1026void phy_start_machine(struct phy_device *phydev); 1029void phy_start_machine(struct phy_device *phydev);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index c9df2527e0cd..668a21f04b09 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -766,8 +766,10 @@ slow_path:
766 if (!key || 766 if (!key ||
767 (params.obj_cmpfn ? 767 (params.obj_cmpfn ?
768 params.obj_cmpfn(&arg, rht_obj(ht, head)) : 768 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
769 rhashtable_compare(&arg, rht_obj(ht, head)))) 769 rhashtable_compare(&arg, rht_obj(ht, head)))) {
770 pprev = &head->next;
770 continue; 771 continue;
772 }
771 773
772 data = rht_obj(ht, head); 774 data = rht_obj(ht, head);
773 775
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d8340e6e8814..47082f54ec1f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4040,6 +4040,12 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4040 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 4040 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4041} 4041}
4042 4042
4043/* Note: Should be called only if skb_is_gso(skb) is true */
4044static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4045{
4046 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4047}
4048
4043static inline void skb_gso_reset(struct sk_buff *skb) 4049static inline void skb_gso_reset(struct sk_buff *skb)
4044{ 4050{
4045 skb_shinfo(skb)->gso_size = 0; 4051 skb_shinfo(skb)->gso_size = 0;
@@ -4047,6 +4053,22 @@ static inline void skb_gso_reset(struct sk_buff *skb)
4047 skb_shinfo(skb)->gso_type = 0; 4053 skb_shinfo(skb)->gso_type = 0;
4048} 4054}
4049 4055
4056static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4057 u16 increment)
4058{
4059 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4060 return;
4061 shinfo->gso_size += increment;
4062}
4063
4064static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4065 u16 decrement)
4066{
4067 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4068 return;
4069 shinfo->gso_size -= decrement;
4070}
4071
4050void __skb_warn_lro_forwarding(const struct sk_buff *skb); 4072void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4051 4073
4052static inline bool skb_warn_if_lro(const struct sk_buff *skb) 4074static inline bool skb_warn_if_lro(const struct sk_buff *skb)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 0a6c71e0ad01..47f8af22f216 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -364,6 +364,7 @@ struct tty_file_private {
364#define TTY_PTY_LOCK 16 /* pty private */ 364#define TTY_PTY_LOCK 16 /* pty private */
365#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ 365#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
366#define TTY_HUPPED 18 /* Post driver->hangup() */ 366#define TTY_HUPPED 18 /* Post driver->hangup() */
367#define TTY_HUPPING 19 /* Hangup in progress */
367#define TTY_LDISC_HALTED 22 /* Line discipline is halted */ 368#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
368 369
369/* Values for tty->flow_change */ 370/* Values for tty->flow_change */
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 5bdbd9f49395..07ee0f84a46c 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -90,6 +90,28 @@ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
90#endif 90#endif
91} 91}
92 92
93static inline unsigned long
94u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
95{
96 unsigned long flags = 0;
97
98#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
99 local_irq_save(flags);
100 write_seqcount_begin(&syncp->seq);
101#endif
102 return flags;
103}
104
105static inline void
106u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
107 unsigned long flags)
108{
109#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
110 write_seqcount_end(&syncp->seq);
111 local_irq_restore(flags);
112#endif
113}
114
93static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp) 115static inline void u64_stats_update_begin_raw(struct u64_stats_sync *syncp)
94{ 116{
95#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 117#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index f1fcec2fd5f8..b7a99ce56bc9 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -63,4 +63,7 @@
63 */ 63 */
64#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12) 64#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12)
65 65
66/* Device needs a pause after every control message. */
67#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
68
66#endif /* __LINUX_USB_QUIRKS_H */ 69#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index bc0cda180c8b..0c3301421c57 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -456,7 +456,6 @@ extern int schedule_on_each_cpu(work_func_t func);
456int execute_in_process_context(work_func_t fn, struct execute_work *); 456int execute_in_process_context(work_func_t fn, struct execute_work *);
457 457
458extern bool flush_work(struct work_struct *work); 458extern bool flush_work(struct work_struct *work);
459extern bool cancel_work(struct work_struct *work);
460extern bool cancel_work_sync(struct work_struct *work); 459extern bool cancel_work_sync(struct work_struct *work);
461 460
462extern bool flush_delayed_work(struct delayed_work *dwork); 461extern bool flush_delayed_work(struct delayed_work *dwork);
diff --git a/include/net/ip.h b/include/net/ip.h
index d53b5a9eae34..36f8f7811093 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -337,6 +337,13 @@ int ip_decrease_ttl(struct iphdr *iph)
337 return --iph->ttl; 337 return --iph->ttl;
338} 338}
339 339
340static inline int ip_mtu_locked(const struct dst_entry *dst)
341{
342 const struct rtable *rt = (const struct rtable *)dst;
343
344 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
345}
346
340static inline 347static inline
341int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst) 348int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
342{ 349{
@@ -344,7 +351,7 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
344 351
345 return pmtudisc == IP_PMTUDISC_DO || 352 return pmtudisc == IP_PMTUDISC_DO ||
346 (pmtudisc == IP_PMTUDISC_WANT && 353 (pmtudisc == IP_PMTUDISC_WANT &&
347 !(dst_metric_locked(dst, RTAX_MTU))); 354 !ip_mtu_locked(dst));
348} 355}
349 356
350static inline bool ip_sk_accept_pmtu(const struct sock *sk) 357static inline bool ip_sk_accept_pmtu(const struct sock *sk)
@@ -370,7 +377,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
370 struct net *net = dev_net(dst->dev); 377 struct net *net = dev_net(dst->dev);
371 378
372 if (net->ipv4.sysctl_ip_fwd_use_pmtu || 379 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
373 dst_metric_locked(dst, RTAX_MTU) || 380 ip_mtu_locked(dst) ||
374 !forwarding) 381 !forwarding)
375 return dst_mtu(dst); 382 return dst_mtu(dst);
376 383
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index ce2abc0ff102..0084013d6bed 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -183,6 +183,9 @@ void rt6_disable_ip(struct net_device *dev, unsigned long event);
183void rt6_sync_down_dev(struct net_device *dev, unsigned long event); 183void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
184void rt6_multipath_rebalance(struct rt6_info *rt); 184void rt6_multipath_rebalance(struct rt6_info *rt);
185 185
186void rt6_uncached_list_add(struct rt6_info *rt);
187void rt6_uncached_list_del(struct rt6_info *rt);
188
186static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) 189static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
187{ 190{
188 const struct dst_entry *dst = skb_dst(skb); 191 const struct dst_entry *dst = skb_dst(skb);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7c7522e8585b..81d0f2107ff1 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -59,6 +59,7 @@ struct fib_nh_exception {
59 int fnhe_genid; 59 int fnhe_genid;
60 __be32 fnhe_daddr; 60 __be32 fnhe_daddr;
61 u32 fnhe_pmtu; 61 u32 fnhe_pmtu;
62 bool fnhe_mtu_locked;
62 __be32 fnhe_gw; 63 __be32 fnhe_gw;
63 unsigned long fnhe_expires; 64 unsigned long fnhe_expires;
64 struct rtable __rcu *fnhe_rth_input; 65 struct rtable __rcu *fnhe_rth_input;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 2fd59ed3be00..2449982daf75 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2077,6 +2077,9 @@ struct ieee80211_txq {
2077 * virtual interface might not be given air time for the transmission of 2077 * virtual interface might not be given air time for the transmission of
2078 * the frame, as it is not synced with the AP/P2P GO yet, and thus the 2078 * the frame, as it is not synced with the AP/P2P GO yet, and thus the
2079 * deauthentication frame might not be transmitted. 2079 * deauthentication frame might not be transmitted.
2080 >
2081 * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
2082 * support QoS NDP for AP probing - that's most likely a driver bug.
2080 * 2083 *
2081 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays 2084 * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
2082 */ 2085 */
@@ -2122,6 +2125,7 @@ enum ieee80211_hw_flags {
2122 IEEE80211_HW_SUPPORTS_TX_FRAG, 2125 IEEE80211_HW_SUPPORTS_TX_FRAG,
2123 IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, 2126 IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
2124 IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, 2127 IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP,
2128 IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP,
2125 2129
2126 /* keep last, obviously */ 2130 /* keep last, obviously */
2127 NUM_IEEE80211_HW_FLAGS 2131 NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/route.h b/include/net/route.h
index 158833ea7988..dbb032d5921b 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -63,7 +63,8 @@ struct rtable {
63 __be32 rt_gateway; 63 __be32 rt_gateway;
64 64
65 /* Miscellaneous cached information */ 65 /* Miscellaneous cached information */
66 u32 rt_pmtu; 66 u32 rt_mtu_locked:1,
67 rt_pmtu:31;
67 68
68 struct list_head rt_uncached; 69 struct list_head rt_uncached;
69 struct uncached_list *rt_uncached_list; 70 struct uncached_list *rt_uncached_list;
@@ -225,6 +226,9 @@ struct in_ifaddr;
225void fib_add_ifaddr(struct in_ifaddr *); 226void fib_add_ifaddr(struct in_ifaddr *);
226void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); 227void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
227 228
229void rt_add_uncached_list(struct rtable *rt);
230void rt_del_uncached_list(struct rtable *rt);
231
228static inline void ip_rt_put(struct rtable *rt) 232static inline void ip_rt_put(struct rtable *rt)
229{ 233{
230 /* dst_release() accepts a NULL parameter. 234 /* dst_release() accepts a NULL parameter.
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d4907b584b38..493e311bbe93 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -824,6 +824,16 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
824 *to_free = skb; 824 *to_free = skb;
825} 825}
826 826
827static inline void __qdisc_drop_all(struct sk_buff *skb,
828 struct sk_buff **to_free)
829{
830 if (skb->prev)
831 skb->prev->next = *to_free;
832 else
833 skb->next = *to_free;
834 *to_free = skb;
835}
836
827static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 837static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
828 struct qdisc_skb_head *qh, 838 struct qdisc_skb_head *qh,
829 struct sk_buff **to_free) 839 struct sk_buff **to_free)
@@ -956,6 +966,15 @@ static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
956 return NET_XMIT_DROP; 966 return NET_XMIT_DROP;
957} 967}
958 968
969static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
970 struct sk_buff **to_free)
971{
972 __qdisc_drop_all(skb, to_free);
973 qdisc_qstats_drop(sch);
974
975 return NET_XMIT_DROP;
976}
977
959/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how 978/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
960 long it will take to send a packet given its size. 979 long it will take to send a packet given its size.
961 */ 980 */
diff --git a/include/net/sock.h b/include/net/sock.h
index b7c75e024e37..709311132d4c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1138,6 +1138,7 @@ struct proto {
1138 1138
1139int proto_register(struct proto *prot, int alloc_slab); 1139int proto_register(struct proto *prot, int alloc_slab);
1140void proto_unregister(struct proto *prot); 1140void proto_unregister(struct proto *prot);
1141int sock_load_diag_module(int family, int protocol);
1141 1142
1142#ifdef SOCK_REFCNT_DEBUG 1143#ifdef SOCK_REFCNT_DEBUG
1143static inline void sk_refcnt_debug_inc(struct sock *sk) 1144static inline void sk_refcnt_debug_inc(struct sock *sk)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 73b2387e3f74..ff3ed435701f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1537,10 +1537,6 @@ struct ib_xrcd {
1537 1537
1538 struct mutex tgt_qp_mutex; 1538 struct mutex tgt_qp_mutex;
1539 struct list_head tgt_qp_list; 1539 struct list_head tgt_qp_list;
1540 /*
1541 * Implementation details of the RDMA core, don't use in drivers:
1542 */
1543 struct rdma_restrack_entry res;
1544}; 1540};
1545 1541
1546struct ib_ah { 1542struct ib_ah {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index d8d4a902a88d..2280b2351739 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -68,6 +68,9 @@ struct scsi_cmnd {
68 struct list_head list; /* scsi_cmnd participates in queue lists */ 68 struct list_head list; /* scsi_cmnd participates in queue lists */
69 struct list_head eh_entry; /* entry for the host eh_cmd_q */ 69 struct list_head eh_entry; /* entry for the host eh_cmd_q */
70 struct delayed_work abort_work; 70 struct delayed_work abort_work;
71
72 struct rcu_head rcu;
73
71 int eh_eflags; /* Used by error handlr */ 74 int eh_eflags; /* Used by error handlr */
72 75
73 /* 76 /*
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 1a1df0d21ee3..a8b7bf879ced 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -571,8 +571,6 @@ struct Scsi_Host {
571 struct blk_mq_tag_set tag_set; 571 struct blk_mq_tag_set tag_set;
572 }; 572 };
573 573
574 struct rcu_head rcu;
575
576 atomic_t host_busy; /* commands actually active on low-level */ 574 atomic_t host_busy; /* commands actually active on low-level */
577 atomic_t host_blocked; 575 atomic_t host_blocked;
578 576
diff --git a/include/trace/events/mmc.h b/include/trace/events/mmc.h
index 200f731be557..7b706ff21335 100644
--- a/include/trace/events/mmc.h
+++ b/include/trace/events/mmc.h
@@ -86,8 +86,8 @@ TRACE_EVENT(mmc_request_start,
86 __entry->stop_flags, __entry->stop_retries, 86 __entry->stop_flags, __entry->stop_retries,
87 __entry->sbc_opcode, __entry->sbc_arg, 87 __entry->sbc_opcode, __entry->sbc_arg,
88 __entry->sbc_flags, __entry->sbc_retries, 88 __entry->sbc_flags, __entry->sbc_retries,
89 __entry->blocks, __entry->blk_addr, 89 __entry->blocks, __entry->blksz,
90 __entry->blksz, __entry->data_flags, __entry->tag, 90 __entry->blk_addr, __entry->data_flags, __entry->tag,
91 __entry->can_retune, __entry->doing_retune, 91 __entry->can_retune, __entry->doing_retune,
92 __entry->retune_now, __entry->need_retune, 92 __entry->retune_now, __entry->need_retune,
93 __entry->hold_retune, __entry->retune_period) 93 __entry->hold_retune, __entry->retune_period)
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index 85dc965afd89..99c902e460c2 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -102,13 +102,13 @@ typedef struct siginfo {
102 short _addr_lsb; /* LSB of the reported address */ 102 short _addr_lsb; /* LSB of the reported address */
103 /* used when si_code=SEGV_BNDERR */ 103 /* used when si_code=SEGV_BNDERR */
104 struct { 104 struct {
105 short _dummy_bnd; 105 void *_dummy_bnd;
106 void __user *_lower; 106 void __user *_lower;
107 void __user *_upper; 107 void __user *_upper;
108 } _addr_bnd; 108 } _addr_bnd;
109 /* used when si_code=SEGV_PKUERR */ 109 /* used when si_code=SEGV_PKUERR */
110 struct { 110 struct {
111 short _dummy_pkey; 111 void *_dummy_pkey;
112 __u32 _pkey; 112 __u32 _pkey;
113 } _addr_pkey; 113 } _addr_pkey;
114 }; 114 };
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 2e4a6c1accaa..3a45b4ad71a3 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -30,6 +30,7 @@
30 */ 30 */
31 31
32#define ETH_ALEN 6 /* Octets in one ethernet addr */ 32#define ETH_ALEN 6 /* Octets in one ethernet addr */
33#define ETH_TLEN 2 /* Octets in ethernet type field */
33#define ETH_HLEN 14 /* Total octets in header. */ 34#define ETH_HLEN 14 /* Total octets in header. */
34#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ 35#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
35#define ETH_DATA_LEN 1500 /* Max. octets in payload */ 36#define ETH_DATA_LEN 1500 /* Max. octets in payload */
diff --git a/include/uapi/misc/ocxl.h b/include/uapi/misc/ocxl.h
index 4b0b0b756f3e..0af83d80fb3e 100644
--- a/include/uapi/misc/ocxl.h
+++ b/include/uapi/misc/ocxl.h
@@ -32,6 +32,22 @@ struct ocxl_ioctl_attach {
32 __u64 reserved3; 32 __u64 reserved3;
33}; 33};
34 34
35struct ocxl_ioctl_metadata {
36 __u16 version; // struct version, always backwards compatible
37
38 // Version 0 fields
39 __u8 afu_version_major;
40 __u8 afu_version_minor;
41 __u32 pasid; // PASID assigned to the current context
42
43 __u64 pp_mmio_size; // Per PASID MMIO size
44 __u64 global_mmio_size;
45
46 // End version 0 fields
47
48 __u64 reserved[13]; // Total of 16*u64
49};
50
35struct ocxl_ioctl_irq_fd { 51struct ocxl_ioctl_irq_fd {
36 __u64 irq_offset; 52 __u64 irq_offset;
37 __s32 eventfd; 53 __s32 eventfd;
@@ -45,5 +61,6 @@ struct ocxl_ioctl_irq_fd {
45#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64) 61#define OCXL_IOCTL_IRQ_ALLOC _IOR(OCXL_MAGIC, 0x11, __u64)
46#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64) 62#define OCXL_IOCTL_IRQ_FREE _IOW(OCXL_MAGIC, 0x12, __u64)
47#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd) 63#define OCXL_IOCTL_IRQ_SET_FD _IOW(OCXL_MAGIC, 0x13, struct ocxl_ioctl_irq_fd)
64#define OCXL_IOCTL_GET_METADATA _IOR(OCXL_MAGIC, 0x14, struct ocxl_ioctl_metadata)
48 65
49#endif /* _UAPI_MISC_OCXL_H */ 66#endif /* _UAPI_MISC_OCXL_H */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3aeb4ea2a93a..dd172ee16716 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1849,7 +1849,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
1849 union bpf_attr attr = {}; 1849 union bpf_attr attr = {};
1850 int err; 1850 int err;
1851 1851
1852 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) 1852 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
1853 return -EPERM; 1853 return -EPERM;
1854 1854
1855 err = check_uarg_tail_zero(uattr, sizeof(attr), size); 1855 err = check_uarg_tail_zero(uattr, sizeof(attr), size);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8cda3bc3ae22..4bfb2908ec15 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3183,6 +3183,16 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
3183 if (cgroup_is_threaded(cgrp)) 3183 if (cgroup_is_threaded(cgrp))
3184 return 0; 3184 return 0;
3185 3185
3186 /*
3187 * If @cgroup is populated or has domain controllers enabled, it
3188 * can't be switched. While the below cgroup_can_be_thread_root()
3189 * test can catch the same conditions, that's only when @parent is
3190 * not mixable, so let's check it explicitly.
3191 */
3192 if (cgroup_is_populated(cgrp) ||
3193 cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask)
3194 return -EOPNOTSUPP;
3195
3186 /* we're joining the parent's domain, ensure its validity */ 3196 /* we're joining the parent's domain, ensure its validity */
3187 if (!cgroup_is_valid_domain(dom_cgrp) || 3197 if (!cgroup_is_valid_domain(dom_cgrp) ||
3188 !cgroup_can_be_thread_root(dom_cgrp)) 3198 !cgroup_can_be_thread_root(dom_cgrp))
diff --git a/kernel/compat.c b/kernel/compat.c
index 3247fe761f60..3f5fa8902e7d 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
488} 488}
489EXPORT_SYMBOL_GPL(get_compat_sigset); 489EXPORT_SYMBOL_GPL(get_compat_sigset);
490 490
491int
492put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
493 unsigned int size)
494{
495 /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
496#ifdef __BIG_ENDIAN
497 compat_sigset_t v;
498 switch (_NSIG_WORDS) {
499 case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
500 case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
501 case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
502 case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
503 }
504 return copy_to_user(compat, &v, size) ? -EFAULT : 0;
505#else
506 return copy_to_user(compat, set, size) ? -EFAULT : 0;
507#endif
508}
509
510#ifdef CONFIG_NUMA 491#ifdef CONFIG_NUMA
511COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages, 492COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
512 compat_uptr_t __user *, pages32, 493 compat_uptr_t __user *, pages32,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 96db9ae5d5af..4b838470fac4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2246,7 +2246,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2246 struct perf_event_context *task_ctx, 2246 struct perf_event_context *task_ctx,
2247 enum event_type_t event_type) 2247 enum event_type_t event_type)
2248{ 2248{
2249 enum event_type_t ctx_event_type = event_type & EVENT_ALL; 2249 enum event_type_t ctx_event_type;
2250 bool cpu_event = !!(event_type & EVENT_CPU); 2250 bool cpu_event = !!(event_type & EVENT_CPU);
2251 2251
2252 /* 2252 /*
@@ -2256,6 +2256,8 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2256 if (event_type & EVENT_PINNED) 2256 if (event_type & EVENT_PINNED)
2257 event_type |= EVENT_FLEXIBLE; 2257 event_type |= EVENT_FLEXIBLE;
2258 2258
2259 ctx_event_type = event_type & EVENT_ALL;
2260
2259 perf_pmu_disable(cpuctx->ctx.pmu); 2261 perf_pmu_disable(cpuctx->ctx.pmu);
2260 if (task_ctx) 2262 if (task_ctx)
2261 task_ctx_sched_out(cpuctx, task_ctx, event_type); 2263 task_ctx_sched_out(cpuctx, task_ctx, event_type);
diff --git a/kernel/fail_function.c b/kernel/fail_function.c
index 21b0122cb39c..1d5632d8bbcc 100644
--- a/kernel/fail_function.c
+++ b/kernel/fail_function.c
@@ -14,6 +14,15 @@
14 14
15static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs); 15static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
16 16
17static void fei_post_handler(struct kprobe *kp, struct pt_regs *regs,
18 unsigned long flags)
19{
20 /*
21 * A dummy post handler is required to prohibit optimizing, because
22 * jump optimization does not support execution path overriding.
23 */
24}
25
17struct fei_attr { 26struct fei_attr {
18 struct list_head list; 27 struct list_head list;
19 struct kprobe kp; 28 struct kprobe kp;
@@ -56,6 +65,7 @@ static struct fei_attr *fei_attr_new(const char *sym, unsigned long addr)
56 return NULL; 65 return NULL;
57 } 66 }
58 attr->kp.pre_handler = fei_kprobe_handler; 67 attr->kp.pre_handler = fei_kprobe_handler;
68 attr->kp.post_handler = fei_post_handler;
59 attr->retval = adjust_error_retval(addr, 0); 69 attr->retval = adjust_error_retval(addr, 0);
60 INIT_LIST_HEAD(&attr->list); 70 INIT_LIST_HEAD(&attr->list);
61 } 71 }
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 52a0a7af8640..e7214093dcd1 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -373,7 +373,8 @@ static void __jump_label_update(struct static_key *key,
373 if (kernel_text_address(entry->code)) 373 if (kernel_text_address(entry->code))
374 arch_jump_label_transform(entry, jump_label_type(entry)); 374 arch_jump_label_transform(entry, jump_label_type(entry));
375 else 375 else
376 WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code); 376 WARN_ONCE(1, "can't patch jump_label at %pS",
377 (void *)(unsigned long)entry->code);
377 } 378 }
378 } 379 }
379} 380}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 65cc0cb984e6..940633c63254 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1616,11 +1616,12 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1616void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) 1616void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1617{ 1617{
1618 DEFINE_WAKE_Q(wake_q); 1618 DEFINE_WAKE_Q(wake_q);
1619 unsigned long flags;
1619 bool postunlock; 1620 bool postunlock;
1620 1621
1621 raw_spin_lock_irq(&lock->wait_lock); 1622 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1622 postunlock = __rt_mutex_futex_unlock(lock, &wake_q); 1623 postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1623 raw_spin_unlock_irq(&lock->wait_lock); 1624 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1624 1625
1625 if (postunlock) 1626 if (postunlock)
1626 rt_mutex_postunlock(&wake_q); 1627 rt_mutex_postunlock(&wake_q);
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 4dd4274cabe2..895e6b76b25e 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -427,7 +427,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
427 err_pfn_remap: 427 err_pfn_remap:
428 err_radix: 428 err_radix:
429 pgmap_radix_release(res, pgoff); 429 pgmap_radix_release(res, pgoff);
430 devres_free(pgmap);
431 return ERR_PTR(error); 430 return ERR_PTR(error);
432} 431}
433EXPORT_SYMBOL(devm_memremap_pages); 432EXPORT_SYMBOL(devm_memremap_pages);
diff --git a/kernel/module.c b/kernel/module.c
index ad2d420024f6..e42764acedb4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4228,7 +4228,7 @@ static int modules_open(struct inode *inode, struct file *file)
4228 m->private = kallsyms_show_value() ? NULL : (void *)8ul; 4228 m->private = kallsyms_show_value() ? NULL : (void *)8ul;
4229 } 4229 }
4230 4230
4231 return 0; 4231 return err;
4232} 4232}
4233 4233
4234static const struct file_operations proc_modules_operations = { 4234static const struct file_operations proc_modules_operations = {
diff --git a/kernel/panic.c b/kernel/panic.c
index 2cfef408fec9..4b794f1d8561 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
640 */ 640 */
641__visible void __stack_chk_fail(void) 641__visible void __stack_chk_fail(void)
642{ 642{
643 panic("stack-protector: Kernel stack is corrupted in: %p\n", 643 panic("stack-protector: Kernel stack is corrupted in: %pB\n",
644 __builtin_return_address(0)); 644 __builtin_return_address(0));
645} 645}
646EXPORT_SYMBOL(__stack_chk_fail); 646EXPORT_SYMBOL(__stack_chk_fail);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e7c535eee0a6..c94895bc5a2c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6683,13 +6683,18 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
6683 parent_quota = parent_b->hierarchical_quota; 6683 parent_quota = parent_b->hierarchical_quota;
6684 6684
6685 /* 6685 /*
6686 * Ensure max(child_quota) <= parent_quota, inherit when no 6686 * Ensure max(child_quota) <= parent_quota. On cgroup2,
6687 * always take the min. On cgroup1, only inherit when no
6687 * limit is set: 6688 * limit is set:
6688 */ 6689 */
6689 if (quota == RUNTIME_INF) 6690 if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
6690 quota = parent_quota; 6691 quota = min(quota, parent_quota);
6691 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 6692 } else {
6692 return -EINVAL; 6693 if (quota == RUNTIME_INF)
6694 quota = parent_quota;
6695 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
6696 return -EINVAL;
6697 }
6693 } 6698 }
6694 cfs_b->hierarchical_quota = quota; 6699 cfs_b->hierarchical_quota = quota;
6695 6700
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index c634e093951f..7f9691c86b6e 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -661,7 +661,41 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
661 .arg3_type = ARG_ANYTHING, 661 .arg3_type = ARG_ANYTHING,
662}; 662};
663 663
664BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx, 664static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
665{
666 switch (func_id) {
667 case BPF_FUNC_perf_event_output:
668 return &bpf_perf_event_output_proto_tp;
669 case BPF_FUNC_get_stackid:
670 return &bpf_get_stackid_proto_tp;
671 default:
672 return tracing_func_proto(func_id);
673 }
674}
675
676static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
677 struct bpf_insn_access_aux *info)
678{
679 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
680 return false;
681 if (type != BPF_READ)
682 return false;
683 if (off % size != 0)
684 return false;
685
686 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
687 return true;
688}
689
690const struct bpf_verifier_ops tracepoint_verifier_ops = {
691 .get_func_proto = tp_prog_func_proto,
692 .is_valid_access = tp_prog_is_valid_access,
693};
694
695const struct bpf_prog_ops tracepoint_prog_ops = {
696};
697
698BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
665 struct bpf_perf_event_value *, buf, u32, size) 699 struct bpf_perf_event_value *, buf, u32, size)
666{ 700{
667 int err = -EINVAL; 701 int err = -EINVAL;
@@ -678,8 +712,8 @@ clear:
678 return err; 712 return err;
679} 713}
680 714
681static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = { 715static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
682 .func = bpf_perf_prog_read_value_tp, 716 .func = bpf_perf_prog_read_value,
683 .gpl_only = true, 717 .gpl_only = true,
684 .ret_type = RET_INTEGER, 718 .ret_type = RET_INTEGER,
685 .arg1_type = ARG_PTR_TO_CTX, 719 .arg1_type = ARG_PTR_TO_CTX,
@@ -687,7 +721,7 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = {
687 .arg3_type = ARG_CONST_SIZE, 721 .arg3_type = ARG_CONST_SIZE,
688}; 722};
689 723
690static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) 724static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id)
691{ 725{
692 switch (func_id) { 726 switch (func_id) {
693 case BPF_FUNC_perf_event_output: 727 case BPF_FUNC_perf_event_output:
@@ -695,34 +729,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
695 case BPF_FUNC_get_stackid: 729 case BPF_FUNC_get_stackid:
696 return &bpf_get_stackid_proto_tp; 730 return &bpf_get_stackid_proto_tp;
697 case BPF_FUNC_perf_prog_read_value: 731 case BPF_FUNC_perf_prog_read_value:
698 return &bpf_perf_prog_read_value_proto_tp; 732 return &bpf_perf_prog_read_value_proto;
699 default: 733 default:
700 return tracing_func_proto(func_id); 734 return tracing_func_proto(func_id);
701 } 735 }
702} 736}
703 737
704static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
705 struct bpf_insn_access_aux *info)
706{
707 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
708 return false;
709 if (type != BPF_READ)
710 return false;
711 if (off % size != 0)
712 return false;
713
714 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
715 return true;
716}
717
718const struct bpf_verifier_ops tracepoint_verifier_ops = {
719 .get_func_proto = tp_prog_func_proto,
720 .is_valid_access = tp_prog_is_valid_access,
721};
722
723const struct bpf_prog_ops tracepoint_prog_ops = {
724};
725
726static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, 738static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
727 struct bpf_insn_access_aux *info) 739 struct bpf_insn_access_aux *info)
728{ 740{
@@ -791,7 +803,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
791} 803}
792 804
793const struct bpf_verifier_ops perf_event_verifier_ops = { 805const struct bpf_verifier_ops perf_event_verifier_ops = {
794 .get_func_proto = tp_prog_func_proto, 806 .get_func_proto = pe_prog_func_proto,
795 .is_valid_access = pe_prog_is_valid_access, 807 .is_valid_access = pe_prog_is_valid_access,
796 .convert_ctx_access = pe_prog_convert_ctx_access, 808 .convert_ctx_access = pe_prog_convert_ctx_access,
797}; 809};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bb9a519cbf50..6ec6ba65127b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3018,14 +3018,6 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
3018 return ret; 3018 return ret;
3019} 3019}
3020 3020
3021/*
3022 * See cancel_delayed_work()
3023 */
3024bool cancel_work(struct work_struct *work)
3025{
3026 return __cancel_work(work, false);
3027}
3028
3029/** 3021/**
3030 * cancel_delayed_work - cancel a delayed work 3022 * cancel_delayed_work - cancel a delayed work
3031 * @dwork: delayed_work to cancel 3023 * @dwork: delayed_work to cancel
@@ -5337,7 +5329,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
5337 5329
5338 ret = device_register(&wq_dev->dev); 5330 ret = device_register(&wq_dev->dev);
5339 if (ret) { 5331 if (ret) {
5340 kfree(wq_dev); 5332 put_device(&wq_dev->dev);
5341 wq->wq_dev = NULL; 5333 wq->wq_dev = NULL;
5342 return ret; 5334 return ret;
5343 } 5335 }
diff --git a/lib/btree.c b/lib/btree.c
index f93a945274af..590facba2c50 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * As should be obvious for Linux kernel code, license is GPLv2 4 * As should be obvious for Linux kernel code, license is GPLv2
5 * 5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is 7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra 8 * Copyright 2007, Red Hat Inc. Peter Zijlstra
9 * GPLv2 9 * GPLv2
@@ -76,6 +76,8 @@ struct btree_geo btree_geo128 = {
76}; 76};
77EXPORT_SYMBOL_GPL(btree_geo128); 77EXPORT_SYMBOL_GPL(btree_geo128);
78 78
79#define MAX_KEYLEN (2 * LONG_PER_U64)
80
79static struct kmem_cache *btree_cachep; 81static struct kmem_cache *btree_cachep;
80 82
81void *btree_alloc(gfp_t gfp_mask, void *pool_data) 83void *btree_alloc(gfp_t gfp_mask, void *pool_data)
@@ -313,7 +315,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
313{ 315{
314 int i, height; 316 int i, height;
315 unsigned long *node, *oldnode; 317 unsigned long *node, *oldnode;
316 unsigned long *retry_key = NULL, key[geo->keylen]; 318 unsigned long *retry_key = NULL, key[MAX_KEYLEN];
317 319
318 if (keyzero(geo, __key)) 320 if (keyzero(geo, __key))
319 return NULL; 321 return NULL;
@@ -639,8 +641,8 @@ EXPORT_SYMBOL_GPL(btree_remove);
639int btree_merge(struct btree_head *target, struct btree_head *victim, 641int btree_merge(struct btree_head *target, struct btree_head *victim,
640 struct btree_geo *geo, gfp_t gfp) 642 struct btree_geo *geo, gfp_t gfp)
641{ 643{
642 unsigned long key[geo->keylen]; 644 unsigned long key[MAX_KEYLEN];
643 unsigned long dup[geo->keylen]; 645 unsigned long dup[MAX_KEYLEN];
644 void *val; 646 void *val;
645 int err; 647 int err;
646 648
diff --git a/lib/bug.c b/lib/bug.c
index c1b0fad31b10..1077366f496b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
150 return BUG_TRAP_TYPE_NONE; 150 return BUG_TRAP_TYPE_NONE;
151 151
152 bug = find_bug(bugaddr); 152 bug = find_bug(bugaddr);
153 if (!bug)
154 return BUG_TRAP_TYPE_NONE;
153 155
154 file = NULL; 156 file = NULL;
155 line = 0; 157 line = 0;
@@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
191 if (file) 193 if (file)
192 pr_crit("kernel BUG at %s:%u!\n", file, line); 194 pr_crit("kernel BUG at %s:%u!\n", file, line);
193 else 195 else
194 pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", 196 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
195 (void *)bugaddr); 197 (void *)bugaddr);
196 198
197 return BUG_TRAP_TYPE_BUG; 199 return BUG_TRAP_TYPE_BUG;
diff --git a/lib/ioremap.c b/lib/ioremap.c
index b808a390e4c3..54e5bbaa3200 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
91 91
92 if (ioremap_pmd_enabled() && 92 if (ioremap_pmd_enabled() &&
93 ((next - addr) == PMD_SIZE) && 93 ((next - addr) == PMD_SIZE) &&
94 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { 94 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
95 pmd_free_pte_page(pmd)) {
95 if (pmd_set_huge(pmd, phys_addr + addr, prot)) 96 if (pmd_set_huge(pmd, phys_addr + addr, prot))
96 continue; 97 continue;
97 } 98 }
@@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
117 118
118 if (ioremap_pud_enabled() && 119 if (ioremap_pud_enabled() &&
119 ((next - addr) == PUD_SIZE) && 120 ((next - addr) == PUD_SIZE) &&
120 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { 121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122 pud_free_pmd_page(pud)) {
121 if (pud_set_huge(pud, phys_addr + addr, prot)) 123 if (pud_set_huge(pud, phys_addr + addr, prot))
122 continue; 124 continue;
123 } 125 }
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 30e7dd88148b..9f96fa7bc000 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -322,6 +322,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
322 * This function normally doesn't block and can be called from any context 322 * This function normally doesn't block and can be called from any context
323 * but it may block if @confirm_kill is specified and @ref is in the 323 * but it may block if @confirm_kill is specified and @ref is in the
324 * process of switching to atomic mode by percpu_ref_switch_to_atomic(). 324 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
325 *
326 * There are no implied RCU grace periods between kill and release.
325 */ 327 */
326void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 328void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
327 percpu_ref_func_t *confirm_kill) 329 percpu_ref_func_t *confirm_kill)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 3825c30aaa36..47de025b6245 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -506,8 +506,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
506 if (!key || 506 if (!key ||
507 (ht->p.obj_cmpfn ? 507 (ht->p.obj_cmpfn ?
508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : 508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
509 rhashtable_compare(&arg, rht_obj(ht, head)))) 509 rhashtable_compare(&arg, rht_obj(ht, head)))) {
510 pprev = &head->next;
510 continue; 511 continue;
512 }
511 513
512 if (!ht->rhlist) 514 if (!ht->rhlist)
513 return rht_obj(ht, head); 515 return rht_obj(ht, head);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 2efb213716fa..3e9335493fe4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5467,7 +5467,7 @@ static struct bpf_test tests[] = {
5467 { 5467 {
5468 "BPF_MAXINSNS: Jump, gap, jump, ...", 5468 "BPF_MAXINSNS: Jump, gap, jump, ...",
5469 { }, 5469 { },
5470#ifdef CONFIG_BPF_JIT_ALWAYS_ON 5470#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
5471 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 5471 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
5472#else 5472#else
5473 CLASSIC | FLAG_NO_DATA, 5473 CLASSIC | FLAG_NO_DATA,
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e372b97eee13..0e5b7a61460b 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
1141 mutex_lock(&reg_dev_mutex); 1141 mutex_lock(&reg_dev_mutex);
1142 1142
1143 /* int should suffice for number of devices, test for wrap */ 1143 /* int should suffice for number of devices, test for wrap */
1144 if (unlikely(num_test_devs + 1) < 0) { 1144 if (num_test_devs + 1 == INT_MAX) {
1145 pr_err("reached limit of number of test devices\n"); 1145 pr_err("reached limit of number of test devices\n");
1146 goto out; 1146 goto out;
1147 } 1147 }
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 76d3667fdea2..f4000c137dbe 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -79,6 +79,21 @@ struct thread_data {
79 struct test_obj *objs; 79 struct test_obj *objs;
80}; 80};
81 81
82static u32 my_hashfn(const void *data, u32 len, u32 seed)
83{
84 const struct test_obj_rhl *obj = data;
85
86 return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
87}
88
89static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
90{
91 const struct test_obj_rhl *test_obj = obj;
92 const struct test_obj_val *val = arg->key;
93
94 return test_obj->value.id - val->id;
95}
96
82static struct rhashtable_params test_rht_params = { 97static struct rhashtable_params test_rht_params = {
83 .head_offset = offsetof(struct test_obj, node), 98 .head_offset = offsetof(struct test_obj, node),
84 .key_offset = offsetof(struct test_obj, value), 99 .key_offset = offsetof(struct test_obj, value),
@@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = {
87 .nulls_base = (3U << RHT_BASE_SHIFT), 102 .nulls_base = (3U << RHT_BASE_SHIFT),
88}; 103};
89 104
105static struct rhashtable_params test_rht_params_dup = {
106 .head_offset = offsetof(struct test_obj_rhl, list_node),
107 .key_offset = offsetof(struct test_obj_rhl, value),
108 .key_len = sizeof(struct test_obj_val),
109 .hashfn = jhash,
110 .obj_hashfn = my_hashfn,
111 .obj_cmpfn = my_cmpfn,
112 .nelem_hint = 128,
113 .automatic_shrinking = false,
114};
115
90static struct semaphore prestart_sem; 116static struct semaphore prestart_sem;
91static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); 117static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
92 118
@@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array,
465 return err; 491 return err;
466} 492}
467 493
494static unsigned int __init print_ht(struct rhltable *rhlt)
495{
496 struct rhashtable *ht;
497 const struct bucket_table *tbl;
498 char buff[512] = "";
499 unsigned int i, cnt = 0;
500
501 ht = &rhlt->ht;
502 tbl = rht_dereference(ht->tbl, ht);
503 for (i = 0; i < tbl->size; i++) {
504 struct rhash_head *pos, *next;
505 struct test_obj_rhl *p;
506
507 pos = rht_dereference(tbl->buckets[i], ht);
508 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
509
510 if (!rht_is_a_nulls(pos)) {
511 sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
512 }
513
514 while (!rht_is_a_nulls(pos)) {
515 struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
516 sprintf(buff, "%s[[", buff);
517 do {
518 pos = &list->rhead;
519 list = rht_dereference(list->next, ht);
520 p = rht_obj(ht, pos);
521
522 sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
523 list? ", " : " ");
524 cnt++;
525 } while (list);
526
527 pos = next,
528 next = !rht_is_a_nulls(pos) ?
529 rht_dereference(pos->next, ht) : NULL;
530
531 sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
532 }
533 }
534 printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
535
536 return cnt;
537}
538
539static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
540 int cnt, bool slow)
541{
542 struct rhltable rhlt;
543 unsigned int i, ret;
544 const char *key;
545 int err = 0;
546
547 err = rhltable_init(&rhlt, &test_rht_params_dup);
548 if (WARN_ON(err))
549 return err;
550
551 for (i = 0; i < cnt; i++) {
552 rhl_test_objects[i].value.tid = i;
553 key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
554 key += test_rht_params_dup.key_offset;
555
556 if (slow) {
557 err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
558 &rhl_test_objects[i].list_node.rhead));
559 if (err == -EAGAIN)
560 err = 0;
561 } else
562 err = rhltable_insert(&rhlt,
563 &rhl_test_objects[i].list_node,
564 test_rht_params_dup);
565 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
566 goto skip_print;
567 }
568
569 ret = print_ht(&rhlt);
570 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
571
572skip_print:
573 rhltable_destroy(&rhlt);
574
575 return 0;
576}
577
578static int __init test_insert_duplicates_run(void)
579{
580 struct test_obj_rhl rhl_test_objects[3] = {};
581
582 pr_info("test inserting duplicates\n");
583
584 /* two different values that map to same bucket */
585 rhl_test_objects[0].value.id = 1;
586 rhl_test_objects[1].value.id = 21;
587
588 /* and another duplicate with same as [0] value
589 * which will be second on the bucket list */
590 rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
591
592 test_insert_dup(rhl_test_objects, 2, false);
593 test_insert_dup(rhl_test_objects, 3, false);
594 test_insert_dup(rhl_test_objects, 2, true);
595 test_insert_dup(rhl_test_objects, 3, true);
596
597 return 0;
598}
599
468static int thread_lookup_test(struct thread_data *tdata) 600static int thread_lookup_test(struct thread_data *tdata)
469{ 601{
470 unsigned int entries = tdata->entries; 602 unsigned int entries = tdata->entries;
@@ -613,6 +745,8 @@ static int __init test_rht_init(void)
613 do_div(total_time, runs); 745 do_div(total_time, runs);
614 pr_info("Average test time: %llu\n", total_time); 746 pr_info("Average test time: %llu\n", total_time);
615 747
748 test_insert_duplicates_run();
749
616 if (!tcount) 750 if (!tcount)
617 return 0; 751 return 0;
618 752
diff --git a/mm/gup.c b/mm/gup.c
index 1b46e6e74881..6afae32571ca 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -516,7 +516,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
516 } 516 }
517 517
518 if (ret & VM_FAULT_RETRY) { 518 if (ret & VM_FAULT_RETRY) {
519 if (nonblocking) 519 if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
520 *nonblocking = 0; 520 *nonblocking = 0;
521 return -EBUSY; 521 return -EBUSY;
522 } 522 }
@@ -890,7 +890,10 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
890 break; 890 break;
891 } 891 }
892 if (*locked) { 892 if (*locked) {
893 /* VM_FAULT_RETRY didn't trigger */ 893 /*
894 * VM_FAULT_RETRY didn't trigger or it was a
895 * FOLL_NOWAIT.
896 */
894 if (!pages_done) 897 if (!pages_done)
895 pages_done = ret; 898 pages_done = ret;
896 break; 899 break;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 87ab9b8f56b5..5a68730eebd6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -555,7 +555,8 @@ static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
555 555
556 VM_BUG_ON_PAGE(!PageCompound(page), page); 556 VM_BUG_ON_PAGE(!PageCompound(page), page);
557 557
558 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { 558 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
559 true)) {
559 put_page(page); 560 put_page(page);
560 count_vm_event(THP_FAULT_FALLBACK); 561 count_vm_event(THP_FAULT_FALLBACK);
561 return VM_FAULT_FALLBACK; 562 return VM_FAULT_FALLBACK;
@@ -1316,7 +1317,7 @@ alloc:
1316 } 1317 }
1317 1318
1318 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, 1319 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
1319 huge_gfp, &memcg, true))) { 1320 huge_gfp | __GFP_NORETRY, &memcg, true))) {
1320 put_page(new_page); 1321 put_page(new_page);
1321 split_huge_pmd(vma, vmf->pmd, vmf->address); 1322 split_huge_pmd(vma, vmf->pmd, vmf->address);
1322 if (page) 1323 if (page)
@@ -2783,11 +2784,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
2783 2784
2784 list_for_each_safe(pos, next, &list) { 2785 list_for_each_safe(pos, next, &list) {
2785 page = list_entry((void *)pos, struct page, mapping); 2786 page = list_entry((void *)pos, struct page, mapping);
2786 lock_page(page); 2787 if (!trylock_page(page))
2788 goto next;
2787 /* split_huge_page() removes page from list on success */ 2789 /* split_huge_page() removes page from list on success */
2788 if (!split_huge_page(page)) 2790 if (!split_huge_page(page))
2789 split++; 2791 split++;
2790 unlock_page(page); 2792 unlock_page(page);
2793next:
2791 put_page(page); 2794 put_page(page);
2792 } 2795 }
2793 2796
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c204e3d132b..976bbc5646fe 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -18,6 +18,7 @@
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/mmdebug.h>
21#include <linux/sched/signal.h> 22#include <linux/sched/signal.h>
22#include <linux/rmap.h> 23#include <linux/rmap.h>
23#include <linux/string_helpers.h> 24#include <linux/string_helpers.h>
@@ -1583,7 +1584,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1583 page = NULL; 1584 page = NULL;
1584 } else { 1585 } else {
1585 h->surplus_huge_pages++; 1586 h->surplus_huge_pages++;
1586 h->nr_huge_pages_node[page_to_nid(page)]++; 1587 h->surplus_huge_pages_node[page_to_nid(page)]++;
1587 } 1588 }
1588 1589
1589out_unlock: 1590out_unlock:
@@ -4374,6 +4375,12 @@ int hugetlb_reserve_pages(struct inode *inode,
4374 struct resv_map *resv_map; 4375 struct resv_map *resv_map;
4375 long gbl_reserve; 4376 long gbl_reserve;
4376 4377
4378 /* This should never happen */
4379 if (from > to) {
4380 VM_WARN(1, "%s called with a negative range\n", __func__);
4381 return -EINVAL;
4382 }
4383
4377 /* 4384 /*
4378 * Only apply hugepage reservation if asked. At fault time, an 4385 * Only apply hugepage reservation if asked. At fault time, an
4379 * attempt will be made for VM_NORESERVE to allocate a page 4386 * attempt will be made for VM_NORESERVE to allocate a page
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b7e2268dfc9a..e42568284e06 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -530,7 +530,12 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
530 goto out; 530 goto out;
531 } 531 }
532 532
533 VM_BUG_ON_PAGE(PageCompound(page), page); 533 /* TODO: teach khugepaged to collapse THP mapped with pte */
534 if (PageCompound(page)) {
535 result = SCAN_PAGE_COMPOUND;
536 goto out;
537 }
538
534 VM_BUG_ON_PAGE(!PageAnon(page), page); 539 VM_BUG_ON_PAGE(!PageAnon(page), page);
535 540
536 /* 541 /*
@@ -960,7 +965,9 @@ static void collapse_huge_page(struct mm_struct *mm,
960 goto out_nolock; 965 goto out_nolock;
961 } 966 }
962 967
963 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { 968 /* Do not oom kill for khugepaged charges */
969 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
970 &memcg, true))) {
964 result = SCAN_CGROUP_CHARGE_FAIL; 971 result = SCAN_CGROUP_CHARGE_FAIL;
965 goto out_nolock; 972 goto out_nolock;
966 } 973 }
@@ -1319,7 +1326,9 @@ static void collapse_shmem(struct mm_struct *mm,
1319 goto out; 1326 goto out;
1320 } 1327 }
1321 1328
1322 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { 1329 /* Do not oom kill for khugepaged charges */
1330 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
1331 &memcg, true))) {
1323 result = SCAN_CGROUP_CHARGE_FAIL; 1332 result = SCAN_CGROUP_CHARGE_FAIL;
1324 goto out; 1333 goto out;
1325 } 1334 }
diff --git a/mm/memblock.c b/mm/memblock.c
index 5a9ca2a1751b..48376bd33274 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1101,34 +1101,6 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1101 *out_nid = r->nid; 1101 *out_nid = r->nid;
1102} 1102}
1103 1103
1104unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
1105 unsigned long max_pfn)
1106{
1107 struct memblock_type *type = &memblock.memory;
1108 unsigned int right = type->cnt;
1109 unsigned int mid, left = 0;
1110 phys_addr_t addr = PFN_PHYS(pfn + 1);
1111
1112 do {
1113 mid = (right + left) / 2;
1114
1115 if (addr < type->regions[mid].base)
1116 right = mid;
1117 else if (addr >= (type->regions[mid].base +
1118 type->regions[mid].size))
1119 left = mid + 1;
1120 else {
1121 /* addr is within the region, so pfn + 1 is valid */
1122 return min(pfn + 1, max_pfn);
1123 }
1124 } while (left < right);
1125
1126 if (right == type->cnt)
1127 return max_pfn;
1128 else
1129 return min(PHYS_PFN(type->regions[right].base), max_pfn);
1130}
1131
1132/** 1104/**
1133 * memblock_set_node - set node ID on memblock regions 1105 * memblock_set_node - set node ID on memblock regions
1134 * @base: base of area to set node ID for 1106 * @base: base of area to set node ID for
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d879f1d8a44a..32cba0332787 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2124,6 +2124,9 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2124 case MPOL_INTERLEAVE: 2124 case MPOL_INTERLEAVE:
2125 return !!nodes_equal(a->v.nodes, b->v.nodes); 2125 return !!nodes_equal(a->v.nodes, b->v.nodes);
2126 case MPOL_PREFERRED: 2126 case MPOL_PREFERRED:
2127 /* a's ->flags is the same as b's */
2128 if (a->flags & MPOL_F_LOCAL)
2129 return true;
2127 return a->v.preferred_node == b->v.preferred_node; 2130 return a->v.preferred_node == b->v.preferred_node;
2128 default: 2131 default:
2129 BUG(); 2132 BUG();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cb416723538f..1741dd23e7c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1910,7 +1910,9 @@ static int move_freepages(struct zone *zone,
1910 * Remove at a later date when no bug reports exist related to 1910 * Remove at a later date when no bug reports exist related to
1911 * grouping pages by mobility 1911 * grouping pages by mobility
1912 */ 1912 */
1913 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1913 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
1914 pfn_valid(page_to_pfn(end_page)) &&
1915 page_zone(start_page) != page_zone(end_page));
1914#endif 1916#endif
1915 1917
1916 if (num_movable) 1918 if (num_movable)
@@ -3594,7 +3596,7 @@ static bool __need_fs_reclaim(gfp_t gfp_mask)
3594 return false; 3596 return false;
3595 3597
3596 /* this guy won't enter reclaim */ 3598 /* this guy won't enter reclaim */
3597 if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) 3599 if (current->flags & PF_MEMALLOC)
3598 return false; 3600 return false;
3599 3601
3600 /* We're only interested __GFP_FS allocations for now */ 3602 /* We're only interested __GFP_FS allocations for now */
@@ -5354,17 +5356,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
5354 if (context != MEMMAP_EARLY) 5356 if (context != MEMMAP_EARLY)
5355 goto not_early; 5357 goto not_early;
5356 5358
5357 if (!early_pfn_valid(pfn)) { 5359 if (!early_pfn_valid(pfn))
5358#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
5359 /*
5360 * Skip to the pfn preceding the next valid one (or
5361 * end_pfn), such that we hit a valid pfn (or end_pfn)
5362 * on our next iteration of the loop.
5363 */
5364 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
5365#endif
5366 continue; 5360 continue;
5367 }
5368 if (!early_pfn_in_nid(pfn, nid)) 5361 if (!early_pfn_in_nid(pfn, nid))
5369 continue; 5362 continue;
5370 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) 5363 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index d2a76642c4ae..38de70ab1a0d 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -34,7 +34,7 @@
34#include <linux/log2.h> 34#include <linux/log2.h>
35 35
36static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 36static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
37 int page_start, int page_end) 37 int page_start, int page_end, gfp_t gfp)
38{ 38{
39 return 0; 39 return 0;
40} 40}
@@ -45,18 +45,18 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
45 /* nada */ 45 /* nada */
46} 46}
47 47
48static struct pcpu_chunk *pcpu_create_chunk(void) 48static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
49{ 49{
50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; 50 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
51 struct pcpu_chunk *chunk; 51 struct pcpu_chunk *chunk;
52 struct page *pages; 52 struct page *pages;
53 int i; 53 int i;
54 54
55 chunk = pcpu_alloc_chunk(); 55 chunk = pcpu_alloc_chunk(gfp);
56 if (!chunk) 56 if (!chunk)
57 return NULL; 57 return NULL;
58 58
59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); 59 pages = alloc_pages(gfp, order_base_2(nr_pages));
60 if (!pages) { 60 if (!pages) {
61 pcpu_free_chunk(chunk); 61 pcpu_free_chunk(chunk);
62 return NULL; 62 return NULL;
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 9158e5a81391..d8078de912de 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -37,7 +37,7 @@ static struct page **pcpu_get_pages(void)
37 lockdep_assert_held(&pcpu_alloc_mutex); 37 lockdep_assert_held(&pcpu_alloc_mutex);
38 38
39 if (!pages) 39 if (!pages)
40 pages = pcpu_mem_zalloc(pages_size); 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL);
41 return pages; 41 return pages;
42} 42}
43 43
@@ -73,18 +73,21 @@ static void pcpu_free_pages(struct pcpu_chunk *chunk,
73 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 73 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
74 * @page_start: page index of the first page to be allocated 74 * @page_start: page index of the first page to be allocated
75 * @page_end: page index of the last page to be allocated + 1 75 * @page_end: page index of the last page to be allocated + 1
76 * @gfp: allocation flags passed to the underlying allocator
76 * 77 *
77 * Allocate pages [@page_start,@page_end) into @pages for all units. 78 * Allocate pages [@page_start,@page_end) into @pages for all units.
78 * The allocation is for @chunk. Percpu core doesn't care about the 79 * The allocation is for @chunk. Percpu core doesn't care about the
79 * content of @pages and will pass it verbatim to pcpu_map_pages(). 80 * content of @pages and will pass it verbatim to pcpu_map_pages().
80 */ 81 */
81static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 82static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
82 struct page **pages, int page_start, int page_end) 83 struct page **pages, int page_start, int page_end,
84 gfp_t gfp)
83{ 85{
84 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM;
85 unsigned int cpu, tcpu; 86 unsigned int cpu, tcpu;
86 int i; 87 int i;
87 88
89 gfp |= __GFP_HIGHMEM;
90
88 for_each_possible_cpu(cpu) { 91 for_each_possible_cpu(cpu) {
89 for (i = page_start; i < page_end; i++) { 92 for (i = page_start; i < page_end; i++) {
90 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 93 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
@@ -262,6 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
262 * @chunk: chunk of interest 265 * @chunk: chunk of interest
263 * @page_start: the start page 266 * @page_start: the start page
264 * @page_end: the end page 267 * @page_end: the end page
268 * @gfp: allocation flags passed to the underlying memory allocator
265 * 269 *
266 * For each cpu, populate and map pages [@page_start,@page_end) into 270 * For each cpu, populate and map pages [@page_start,@page_end) into
267 * @chunk. 271 * @chunk.
@@ -270,7 +274,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
270 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 274 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
271 */ 275 */
272static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 276static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
273 int page_start, int page_end) 277 int page_start, int page_end, gfp_t gfp)
274{ 278{
275 struct page **pages; 279 struct page **pages;
276 280
@@ -278,7 +282,7 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
278 if (!pages) 282 if (!pages)
279 return -ENOMEM; 283 return -ENOMEM;
280 284
281 if (pcpu_alloc_pages(chunk, pages, page_start, page_end)) 285 if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
282 return -ENOMEM; 286 return -ENOMEM;
283 287
284 if (pcpu_map_pages(chunk, pages, page_start, page_end)) { 288 if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
@@ -325,12 +329,12 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
325 pcpu_free_pages(chunk, pages, page_start, page_end); 329 pcpu_free_pages(chunk, pages, page_start, page_end);
326} 330}
327 331
328static struct pcpu_chunk *pcpu_create_chunk(void) 332static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
329{ 333{
330 struct pcpu_chunk *chunk; 334 struct pcpu_chunk *chunk;
331 struct vm_struct **vms; 335 struct vm_struct **vms;
332 336
333 chunk = pcpu_alloc_chunk(); 337 chunk = pcpu_alloc_chunk(gfp);
334 if (!chunk) 338 if (!chunk)
335 return NULL; 339 return NULL;
336 340
diff --git a/mm/percpu.c b/mm/percpu.c
index 50e7fdf84055..9297098519a6 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -80,6 +80,7 @@
80#include <linux/vmalloc.h> 80#include <linux/vmalloc.h>
81#include <linux/workqueue.h> 81#include <linux/workqueue.h>
82#include <linux/kmemleak.h> 82#include <linux/kmemleak.h>
83#include <linux/sched.h>
83 84
84#include <asm/cacheflush.h> 85#include <asm/cacheflush.h>
85#include <asm/sections.h> 86#include <asm/sections.h>
@@ -447,26 +448,25 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
447/** 448/**
448 * pcpu_mem_zalloc - allocate memory 449 * pcpu_mem_zalloc - allocate memory
449 * @size: bytes to allocate 450 * @size: bytes to allocate
451 * @gfp: allocation flags
450 * 452 *
451 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 453 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
452 * kzalloc() is used; otherwise, vzalloc() is used. The returned 454 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
453 * memory is always zeroed. 455 * This is to facilitate passing through whitelisted flags. The
454 * 456 * returned memory is always zeroed.
455 * CONTEXT:
456 * Does GFP_KERNEL allocation.
457 * 457 *
458 * RETURNS: 458 * RETURNS:
459 * Pointer to the allocated area on success, NULL on failure. 459 * Pointer to the allocated area on success, NULL on failure.
460 */ 460 */
461static void *pcpu_mem_zalloc(size_t size) 461static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
462{ 462{
463 if (WARN_ON_ONCE(!slab_is_available())) 463 if (WARN_ON_ONCE(!slab_is_available()))
464 return NULL; 464 return NULL;
465 465
466 if (size <= PAGE_SIZE) 466 if (size <= PAGE_SIZE)
467 return kzalloc(size, GFP_KERNEL); 467 return kzalloc(size, gfp);
468 else 468 else
469 return vzalloc(size); 469 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
470} 470}
471 471
472/** 472/**
@@ -1154,12 +1154,12 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1154 return chunk; 1154 return chunk;
1155} 1155}
1156 1156
1157static struct pcpu_chunk *pcpu_alloc_chunk(void) 1157static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1158{ 1158{
1159 struct pcpu_chunk *chunk; 1159 struct pcpu_chunk *chunk;
1160 int region_bits; 1160 int region_bits;
1161 1161
1162 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 1162 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1163 if (!chunk) 1163 if (!chunk)
1164 return NULL; 1164 return NULL;
1165 1165
@@ -1168,17 +1168,17 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
1168 region_bits = pcpu_chunk_map_bits(chunk); 1168 region_bits = pcpu_chunk_map_bits(chunk);
1169 1169
1170 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 1170 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1171 sizeof(chunk->alloc_map[0])); 1171 sizeof(chunk->alloc_map[0]), gfp);
1172 if (!chunk->alloc_map) 1172 if (!chunk->alloc_map)
1173 goto alloc_map_fail; 1173 goto alloc_map_fail;
1174 1174
1175 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 1175 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1176 sizeof(chunk->bound_map[0])); 1176 sizeof(chunk->bound_map[0]), gfp);
1177 if (!chunk->bound_map) 1177 if (!chunk->bound_map)
1178 goto bound_map_fail; 1178 goto bound_map_fail;
1179 1179
1180 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 1180 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1181 sizeof(chunk->md_blocks[0])); 1181 sizeof(chunk->md_blocks[0]), gfp);
1182 if (!chunk->md_blocks) 1182 if (!chunk->md_blocks)
1183 goto md_blocks_fail; 1183 goto md_blocks_fail;
1184 1184
@@ -1277,9 +1277,11 @@ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1277 * pcpu_addr_to_page - translate address to physical address 1277 * pcpu_addr_to_page - translate address to physical address
1278 * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1278 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1279 */ 1279 */
1280static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 1280static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1281static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 1281 int page_start, int page_end, gfp_t gfp);
1282static struct pcpu_chunk *pcpu_create_chunk(void); 1282static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1283 int page_start, int page_end);
1284static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1283static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 1285static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1284static struct page *pcpu_addr_to_page(void *addr); 1286static struct page *pcpu_addr_to_page(void *addr);
1285static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1287static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
@@ -1339,6 +1341,8 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1339static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1341static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1340 gfp_t gfp) 1342 gfp_t gfp)
1341{ 1343{
1344 /* whitelisted flags that can be passed to the backing allocators */
1345 gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1342 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 1346 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1343 bool do_warn = !(gfp & __GFP_NOWARN); 1347 bool do_warn = !(gfp & __GFP_NOWARN);
1344 static int warn_limit = 10; 1348 static int warn_limit = 10;
@@ -1369,8 +1373,17 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1369 return NULL; 1373 return NULL;
1370 } 1374 }
1371 1375
1372 if (!is_atomic) 1376 if (!is_atomic) {
1373 mutex_lock(&pcpu_alloc_mutex); 1377 /*
1378 * pcpu_balance_workfn() allocates memory under this mutex,
1379 * and it may wait for memory reclaim. Allow current task
1380 * to become OOM victim, in case of memory pressure.
1381 */
1382 if (gfp & __GFP_NOFAIL)
1383 mutex_lock(&pcpu_alloc_mutex);
1384 else if (mutex_lock_killable(&pcpu_alloc_mutex))
1385 return NULL;
1386 }
1374 1387
1375 spin_lock_irqsave(&pcpu_lock, flags); 1388 spin_lock_irqsave(&pcpu_lock, flags);
1376 1389
@@ -1421,7 +1434,7 @@ restart:
1421 } 1434 }
1422 1435
1423 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 1436 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1424 chunk = pcpu_create_chunk(); 1437 chunk = pcpu_create_chunk(pcpu_gfp);
1425 if (!chunk) { 1438 if (!chunk) {
1426 err = "failed to allocate new chunk"; 1439 err = "failed to allocate new chunk";
1427 goto fail; 1440 goto fail;
@@ -1450,7 +1463,7 @@ area_found:
1450 page_start, page_end) { 1463 page_start, page_end) {
1451 WARN_ON(chunk->immutable); 1464 WARN_ON(chunk->immutable);
1452 1465
1453 ret = pcpu_populate_chunk(chunk, rs, re); 1466 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1454 1467
1455 spin_lock_irqsave(&pcpu_lock, flags); 1468 spin_lock_irqsave(&pcpu_lock, flags);
1456 if (ret) { 1469 if (ret) {
@@ -1561,10 +1574,17 @@ void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1561 * pcpu_balance_workfn - manage the amount of free chunks and populated pages 1574 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1562 * @work: unused 1575 * @work: unused
1563 * 1576 *
1564 * Reclaim all fully free chunks except for the first one. 1577 * Reclaim all fully free chunks except for the first one. This is also
1578 * responsible for maintaining the pool of empty populated pages. However,
1579 * it is possible that this is called when physical memory is scarce causing
1580 * OOM killer to be triggered. We should avoid doing so until an actual
1581 * allocation causes the failure as it is possible that requests can be
1582 * serviced from already backed regions.
1565 */ 1583 */
1566static void pcpu_balance_workfn(struct work_struct *work) 1584static void pcpu_balance_workfn(struct work_struct *work)
1567{ 1585{
1586 /* gfp flags passed to underlying allocators */
1587 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1568 LIST_HEAD(to_free); 1588 LIST_HEAD(to_free);
1569 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1589 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1570 struct pcpu_chunk *chunk, *next; 1590 struct pcpu_chunk *chunk, *next;
@@ -1600,6 +1620,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
1600 spin_unlock_irq(&pcpu_lock); 1620 spin_unlock_irq(&pcpu_lock);
1601 } 1621 }
1602 pcpu_destroy_chunk(chunk); 1622 pcpu_destroy_chunk(chunk);
1623 cond_resched();
1603 } 1624 }
1604 1625
1605 /* 1626 /*
@@ -1645,7 +1666,7 @@ retry_pop:
1645 chunk->nr_pages) { 1666 chunk->nr_pages) {
1646 int nr = min(re - rs, nr_to_pop); 1667 int nr = min(re - rs, nr_to_pop);
1647 1668
1648 ret = pcpu_populate_chunk(chunk, rs, rs + nr); 1669 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
1649 if (!ret) { 1670 if (!ret) {
1650 nr_to_pop -= nr; 1671 nr_to_pop -= nr;
1651 spin_lock_irq(&pcpu_lock); 1672 spin_lock_irq(&pcpu_lock);
@@ -1662,7 +1683,7 @@ retry_pop:
1662 1683
1663 if (nr_to_pop) { 1684 if (nr_to_pop) {
1664 /* ran out of chunks to populate, create a new one and retry */ 1685 /* ran out of chunks to populate, create a new one and retry */
1665 chunk = pcpu_create_chunk(); 1686 chunk = pcpu_create_chunk(gfp);
1666 if (chunk) { 1687 if (chunk) {
1667 spin_lock_irq(&pcpu_lock); 1688 spin_lock_irq(&pcpu_lock);
1668 pcpu_chunk_relocate(chunk, -1); 1689 pcpu_chunk_relocate(chunk, -1);
diff --git a/mm/shmem.c b/mm/shmem.c
index 1907688b75ee..b85919243399 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -493,36 +493,45 @@ next:
493 info = list_entry(pos, struct shmem_inode_info, shrinklist); 493 info = list_entry(pos, struct shmem_inode_info, shrinklist);
494 inode = &info->vfs_inode; 494 inode = &info->vfs_inode;
495 495
496 if (nr_to_split && split >= nr_to_split) { 496 if (nr_to_split && split >= nr_to_split)
497 iput(inode); 497 goto leave;
498 continue;
499 }
500 498
501 page = find_lock_page(inode->i_mapping, 499 page = find_get_page(inode->i_mapping,
502 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 500 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
503 if (!page) 501 if (!page)
504 goto drop; 502 goto drop;
505 503
504 /* No huge page at the end of the file: nothing to split */
506 if (!PageTransHuge(page)) { 505 if (!PageTransHuge(page)) {
507 unlock_page(page);
508 put_page(page); 506 put_page(page);
509 goto drop; 507 goto drop;
510 } 508 }
511 509
510 /*
511 * Leave the inode on the list if we failed to lock
512 * the page at this time.
513 *
514 * Waiting for the lock may lead to deadlock in the
515 * reclaim path.
516 */
517 if (!trylock_page(page)) {
518 put_page(page);
519 goto leave;
520 }
521
512 ret = split_huge_page(page); 522 ret = split_huge_page(page);
513 unlock_page(page); 523 unlock_page(page);
514 put_page(page); 524 put_page(page);
515 525
516 if (ret) { 526 /* If split failed leave the inode on the list */
517 /* split failed: leave it on the list */ 527 if (ret)
518 iput(inode); 528 goto leave;
519 continue;
520 }
521 529
522 split++; 530 split++;
523drop: 531drop:
524 list_del_init(&info->shrinklist); 532 list_del_init(&info->shrinklist);
525 removed++; 533 removed++;
534leave:
526 iput(inode); 535 iput(inode);
527 } 536 }
528 537
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bee53495a829..cd5dc3faaa57 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1780,6 +1780,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1780 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 1780 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
1781 1781
1782 /* 1782 /*
1783 * If dirty pages are scanned that are not queued for IO, it
1784 * implies that flushers are not doing their job. This can
1785 * happen when memory pressure pushes dirty pages to the end of
1786 * the LRU before the dirty limits are breached and the dirty
1787 * data has expired. It can also happen when the proportion of
1788 * dirty pages grows not through writes but through memory
1789 * pressure reclaiming all the clean cache. And in some cases,
1790 * the flushers simply cannot keep up with the allocation
1791 * rate. Nudge the flusher threads in case they are asleep.
1792 */
1793 if (stat.nr_unqueued_dirty == nr_taken)
1794 wakeup_flusher_threads(WB_REASON_VMSCAN);
1795
1796 /*
1783 * Legacy memcg will stall in page writeback so avoid forcibly 1797 * Legacy memcg will stall in page writeback so avoid forcibly
1784 * stalling here. 1798 * stalling here.
1785 */ 1799 */
@@ -1791,22 +1805,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1791 if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested) 1805 if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
1792 set_bit(PGDAT_CONGESTED, &pgdat->flags); 1806 set_bit(PGDAT_CONGESTED, &pgdat->flags);
1793 1807
1794 /* 1808 /* Allow kswapd to start writing pages during reclaim. */
1795 * If dirty pages are scanned that are not queued for IO, it 1809 if (stat.nr_unqueued_dirty == nr_taken)
1796 * implies that flushers are not doing their job. This can
1797 * happen when memory pressure pushes dirty pages to the end of
1798 * the LRU before the dirty limits are breached and the dirty
1799 * data has expired. It can also happen when the proportion of
1800 * dirty pages grows not through writes but through memory
1801 * pressure reclaiming all the clean cache. And in some cases,
1802 * the flushers simply cannot keep up with the allocation
1803 * rate. Nudge the flusher threads in case they are asleep, but
1804 * also allow kswapd to start writing pages during reclaim.
1805 */
1806 if (stat.nr_unqueued_dirty == nr_taken) {
1807 wakeup_flusher_threads(WB_REASON_VMSCAN);
1808 set_bit(PGDAT_DIRTY, &pgdat->flags); 1810 set_bit(PGDAT_DIRTY, &pgdat->flags);
1809 }
1810 1811
1811 /* 1812 /*
1812 * If kswapd scans pages marked marked for immediate 1813 * If kswapd scans pages marked marked for immediate
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 64aa9f755e1d..45c9bf5ff3a0 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -48,8 +48,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
48 * original position later 48 * original position later
49 */ 49 */
50 skb_push(skb, offset); 50 skb_push(skb, offset);
51 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, 51 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
52 skb->vlan_tci); 52 skb->vlan_tci, skb->mac_len);
53 if (!skb) 53 if (!skb)
54 return false; 54 return false;
55 skb_pull(skb, offset + VLAN_HLEN); 55 skb_pull(skb, offset + VLAN_HLEN);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 75dda9454ccf..a60bacf7120b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -400,7 +400,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
400 batadv_arp_hw_src(skb, hdr_size), &ip_src, 400 batadv_arp_hw_src(skb, hdr_size), &ip_src,
401 batadv_arp_hw_dst(skb, hdr_size), &ip_dst); 401 batadv_arp_hw_dst(skb, hdr_size), &ip_dst);
402 402
403 if (hdr_size == 0) 403 if (hdr_size < sizeof(struct batadv_unicast_packet))
404 return; 404 return;
405 405
406 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; 406 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 7d5e9abb7a65..55c358ad3331 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -24,6 +24,7 @@
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27#include <linux/eventpoll.h>
27#include <linux/export.h> 28#include <linux/export.h>
28#include <linux/fcntl.h> 29#include <linux/fcntl.h>
29#include <linux/fs.h> 30#include <linux/fs.h>
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 52d8a4b848c0..853773e45f79 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -22,6 +22,7 @@
22#include <linux/compiler.h> 22#include <linux/compiler.h>
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/eventpoll.h>
25#include <linux/export.h> 26#include <linux/export.h>
26#include <linux/fcntl.h> 27#include <linux/fcntl.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 5615b6abea6f..de3a055f7dd8 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -598,8 +598,8 @@ update:
598 bat_priv->mcast.enabled = true; 598 bat_priv->mcast.enabled = true;
599 } 599 }
600 600
601 return !(mcast_data.flags & 601 return !(mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV4 &&
602 (BATADV_MCAST_WANT_ALL_IPV4 | BATADV_MCAST_WANT_ALL_IPV6)); 602 mcast_data.flags & BATADV_MCAST_WANT_ALL_IPV6);
603} 603}
604 604
605/** 605/**
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 289df027ecdd..cc3ed93a6d51 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -759,6 +759,7 @@ free_skb:
759/** 759/**
760 * batadv_reroute_unicast_packet() - update the unicast header for re-routing 760 * batadv_reroute_unicast_packet() - update the unicast header for re-routing
761 * @bat_priv: the bat priv with all the soft interface information 761 * @bat_priv: the bat priv with all the soft interface information
762 * @skb: unicast packet to process
762 * @unicast_packet: the unicast header to be updated 763 * @unicast_packet: the unicast header to be updated
763 * @dst_addr: the payload destination 764 * @dst_addr: the payload destination
764 * @vid: VLAN identifier 765 * @vid: VLAN identifier
@@ -770,7 +771,7 @@ free_skb:
770 * Return: true if the packet header has been updated, false otherwise 771 * Return: true if the packet header has been updated, false otherwise
771 */ 772 */
772static bool 773static bool
773batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, 774batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
774 struct batadv_unicast_packet *unicast_packet, 775 struct batadv_unicast_packet *unicast_packet,
775 u8 *dst_addr, unsigned short vid) 776 u8 *dst_addr, unsigned short vid)
776{ 777{
@@ -799,8 +800,10 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
799 } 800 }
800 801
801 /* update the packet header */ 802 /* update the packet header */
803 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
802 ether_addr_copy(unicast_packet->dest, orig_addr); 804 ether_addr_copy(unicast_packet->dest, orig_addr);
803 unicast_packet->ttvn = orig_ttvn; 805 unicast_packet->ttvn = orig_ttvn;
806 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
804 807
805 ret = true; 808 ret = true;
806out: 809out:
@@ -841,7 +844,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
841 * the packet to 844 * the packet to
842 */ 845 */
843 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) { 846 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
844 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, 847 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
845 ethhdr->h_dest, vid)) 848 ethhdr->h_dest, vid))
846 batadv_dbg_ratelimited(BATADV_DBG_TT, 849 batadv_dbg_ratelimited(BATADV_DBG_TT,
847 bat_priv, 850 bat_priv,
@@ -887,7 +890,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
887 * destination can possibly be updated and forwarded towards the new 890 * destination can possibly be updated and forwarded towards the new
888 * target host 891 * target host
889 */ 892 */
890 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet, 893 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
891 ethhdr->h_dest, vid)) { 894 ethhdr->h_dest, vid)) {
892 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv, 895 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
893 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n", 896 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
@@ -910,12 +913,14 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
910 if (!primary_if) 913 if (!primary_if)
911 return false; 914 return false;
912 915
916 /* update the packet header */
917 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
913 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr); 918 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
919 unicast_packet->ttvn = curr_ttvn;
920 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
914 921
915 batadv_hardif_put(primary_if); 922 batadv_hardif_put(primary_if);
916 923
917 unicast_packet->ttvn = curr_ttvn;
918
919 return true; 924 return true;
920} 925}
921 926
@@ -968,14 +973,10 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
968 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL; 973 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
969 int check, hdr_size = sizeof(*unicast_packet); 974 int check, hdr_size = sizeof(*unicast_packet);
970 enum batadv_subtype subtype; 975 enum batadv_subtype subtype;
971 struct ethhdr *ethhdr;
972 int ret = NET_RX_DROP; 976 int ret = NET_RX_DROP;
973 bool is4addr, is_gw; 977 bool is4addr, is_gw;
974 978
975 unicast_packet = (struct batadv_unicast_packet *)skb->data; 979 unicast_packet = (struct batadv_unicast_packet *)skb->data;
976 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
977 ethhdr = eth_hdr(skb);
978
979 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR; 980 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
980 /* the caller function should have already pulled 2 bytes */ 981 /* the caller function should have already pulled 2 bytes */
981 if (is4addr) 982 if (is4addr)
@@ -995,12 +996,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
995 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size)) 996 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
996 goto free_skb; 997 goto free_skb;
997 998
999 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1000
998 /* packet for me */ 1001 /* packet for me */
999 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { 1002 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
1000 /* If this is a unicast packet from another backgone gw, 1003 /* If this is a unicast packet from another backgone gw,
1001 * drop it. 1004 * drop it.
1002 */ 1005 */
1003 orig_addr_gw = ethhdr->h_source; 1006 orig_addr_gw = eth_hdr(skb)->h_source;
1004 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw); 1007 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
1005 if (orig_node_gw) { 1008 if (orig_node_gw) {
1006 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw, 1009 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
@@ -1015,6 +1018,8 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1015 } 1018 }
1016 1019
1017 if (is4addr) { 1020 if (is4addr) {
1021 unicast_4addr_packet =
1022 (struct batadv_unicast_4addr_packet *)skb->data;
1018 subtype = unicast_4addr_packet->subtype; 1023 subtype = unicast_4addr_packet->subtype;
1019 batadv_dat_inc_counter(bat_priv, subtype); 1024 batadv_dat_inc_counter(bat_priv, subtype);
1020 1025
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 01117ae84f1d..a2ddae2f37d7 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2296,8 +2296,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
2296 else 2296 else
2297 sec_level = authreq_to_seclevel(auth); 2297 sec_level = authreq_to_seclevel(auth);
2298 2298
2299 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) 2299 if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
2300 /* If link is already encrypted with sufficient security we
2301 * still need refresh encryption as per Core Spec 5.0 Vol 3,
2302 * Part H 2.4.6
2303 */
2304 smp_ltk_encrypt(conn, hcon->sec_level);
2300 return 0; 2305 return 0;
2306 }
2301 2307
2302 if (sec_level > hcon->pending_sec_level) 2308 if (sec_level > hcon->pending_sec_level)
2303 hcon->pending_sec_level = sec_level; 2309 hcon->pending_sec_level = sec_level;
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index c5afb4232ecb..620e54f08296 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -177,6 +177,28 @@ static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); 177 return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
178} 178}
179 179
180static bool wormhash_offset_invalid(int off, unsigned int len)
181{
182 if (off == 0) /* not present */
183 return false;
184
185 if (off < (int)sizeof(struct ebt_among_info) ||
186 off % __alignof__(struct ebt_mac_wormhash))
187 return true;
188
189 off += sizeof(struct ebt_mac_wormhash);
190
191 return off > len;
192}
193
194static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
195{
196 if (a == 0)
197 a = sizeof(struct ebt_among_info);
198
199 return ebt_mac_wormhash_size(wh) + a == b;
200}
201
180static int ebt_among_mt_check(const struct xt_mtchk_param *par) 202static int ebt_among_mt_check(const struct xt_mtchk_param *par)
181{ 203{
182 const struct ebt_among_info *info = par->matchinfo; 204 const struct ebt_among_info *info = par->matchinfo;
@@ -189,6 +211,10 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
189 if (expected_length > em->match_size) 211 if (expected_length > em->match_size)
190 return -EINVAL; 212 return -EINVAL;
191 213
214 if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
215 wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
216 return -EINVAL;
217
192 wh_dst = ebt_among_wh_dst(info); 218 wh_dst = ebt_among_wh_dst(info);
193 if (poolsize_invalid(wh_dst)) 219 if (poolsize_invalid(wh_dst))
194 return -EINVAL; 220 return -EINVAL;
@@ -201,6 +227,14 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
201 if (poolsize_invalid(wh_src)) 227 if (poolsize_invalid(wh_src))
202 return -EINVAL; 228 return -EINVAL;
203 229
230 if (info->wh_src_ofs < info->wh_dst_ofs) {
231 if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
232 return -EINVAL;
233 } else {
234 if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
235 return -EINVAL;
236 }
237
204 expected_length += ebt_mac_wormhash_size(wh_src); 238 expected_length += ebt_mac_wormhash_size(wh_src);
205 239
206 if (em->match_size != EBT_ALIGN(expected_length)) { 240 if (em->match_size != EBT_ALIGN(expected_length)) {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 254ef9f49567..a94d23b0a9af 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2119,8 +2119,12 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2119 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2119 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2120 */ 2120 */
2121 for (i = 0; i < 4 ; ++i) { 2121 for (i = 0; i < 4 ; ++i) {
2122 if (offsets[i] >= *total) 2122 if (offsets[i] > *total)
2123 return -EINVAL; 2123 return -EINVAL;
2124
2125 if (i < 3 && offsets[i] == *total)
2126 return -EINVAL;
2127
2124 if (i == 0) 2128 if (i == 0)
2125 continue; 2129 continue;
2126 if (offsets[i-1] > offsets[i]) 2130 if (offsets[i-1] > offsets[i])
diff --git a/net/core/dev.c b/net/core/dev.c
index d8887cc38e7b..f9c28f44286c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3278,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3278#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 3278#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3279static void skb_update_prio(struct sk_buff *skb) 3279static void skb_update_prio(struct sk_buff *skb)
3280{ 3280{
3281 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); 3281 const struct netprio_map *map;
3282 const struct sock *sk;
3283 unsigned int prioidx;
3282 3284
3283 if (!skb->priority && skb->sk && map) { 3285 if (skb->priority)
3284 unsigned int prioidx = 3286 return;
3285 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data); 3287 map = rcu_dereference_bh(skb->dev->priomap);
3288 if (!map)
3289 return;
3290 sk = skb_to_full_sk(skb);
3291 if (!sk)
3292 return;
3286 3293
3287 if (prioidx < map->priomap_len) 3294 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3288 skb->priority = map->priomap[prioidx]; 3295
3289 } 3296 if (prioidx < map->priomap_len)
3297 skb->priority = map->priomap[prioidx];
3290} 3298}
3291#else 3299#else
3292#define skb_update_prio(skb) 3300#define skb_update_prio(skb)
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 0ab1af04296c..a04e1e88bf3a 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
402 if (colon) 402 if (colon)
403 *colon = 0; 403 *colon = 0;
404 404
405 dev_load(net, ifr->ifr_name);
406
407 /* 405 /*
408 * See which interface the caller is talking about. 406 * See which interface the caller is talking about.
409 */ 407 */
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
423 case SIOCGIFMAP: 421 case SIOCGIFMAP:
424 case SIOCGIFINDEX: 422 case SIOCGIFINDEX:
425 case SIOCGIFTXQLEN: 423 case SIOCGIFTXQLEN:
424 dev_load(net, ifr->ifr_name);
426 rcu_read_lock(); 425 rcu_read_lock();
427 ret = dev_ifsioc_locked(net, ifr, cmd); 426 ret = dev_ifsioc_locked(net, ifr, cmd);
428 rcu_read_unlock(); 427 rcu_read_unlock();
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
431 return ret; 430 return ret;
432 431
433 case SIOCETHTOOL: 432 case SIOCETHTOOL:
433 dev_load(net, ifr->ifr_name);
434 rtnl_lock(); 434 rtnl_lock();
435 ret = dev_ethtool(net, ifr); 435 ret = dev_ethtool(net, ifr);
436 rtnl_unlock(); 436 rtnl_unlock();
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
447 case SIOCGMIIPHY: 447 case SIOCGMIIPHY:
448 case SIOCGMIIREG: 448 case SIOCGMIIREG:
449 case SIOCSIFNAME: 449 case SIOCSIFNAME:
450 dev_load(net, ifr->ifr_name);
450 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 451 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
451 return -EPERM; 452 return -EPERM;
452 rtnl_lock(); 453 rtnl_lock();
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
494 /* fall through */ 495 /* fall through */
495 case SIOCBONDSLAVEINFOQUERY: 496 case SIOCBONDSLAVEINFOQUERY:
496 case SIOCBONDINFOQUERY: 497 case SIOCBONDINFOQUERY:
498 dev_load(net, ifr->ifr_name);
497 rtnl_lock(); 499 rtnl_lock();
498 ret = dev_ifsioc(net, ifr, cmd); 500 ret = dev_ifsioc(net, ifr, cmd);
499 rtnl_unlock(); 501 rtnl_unlock();
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
518 cmd == SIOCGHWTSTAMP || 520 cmd == SIOCGHWTSTAMP ||
519 (cmd >= SIOCDEVPRIVATE && 521 (cmd >= SIOCDEVPRIVATE &&
520 cmd <= SIOCDEVPRIVATE + 15)) { 522 cmd <= SIOCDEVPRIVATE + 15)) {
523 dev_load(net, ifr->ifr_name);
521 rtnl_lock(); 524 rtnl_lock();
522 ret = dev_ifsioc(net, ifr, cmd); 525 ret = dev_ifsioc(net, ifr, cmd);
523 rtnl_unlock(); 526 rtnl_unlock();
diff --git a/net/core/devlink.c b/net/core/devlink.c
index d03b96f87c25..9236e421bd62 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1798,7 +1798,7 @@ send_done:
1798 if (!nlh) { 1798 if (!nlh) {
1799 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 1799 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
1800 if (err) 1800 if (err)
1801 goto err_skb_send_alloc; 1801 return err;
1802 goto send_done; 1802 goto send_done;
1803 } 1803 }
1804 1804
@@ -1807,7 +1807,6 @@ send_done:
1807nla_put_failure: 1807nla_put_failure:
1808 err = -EMSGSIZE; 1808 err = -EMSGSIZE;
1809err_table_put: 1809err_table_put:
1810err_skb_send_alloc:
1811 genlmsg_cancel(skb, hdr); 1810 genlmsg_cancel(skb, hdr);
1812 nlmsg_free(skb); 1811 nlmsg_free(skb);
1813 return err; 1812 return err;
@@ -2073,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
2073 table->counters_enabled, 2072 table->counters_enabled,
2074 &dump_ctx); 2073 &dump_ctx);
2075 if (err) 2074 if (err)
2076 goto err_entries_dump; 2075 return err;
2077 2076
2078send_done: 2077send_done:
2079 nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq, 2078 nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
@@ -2081,16 +2080,10 @@ send_done:
2081 if (!nlh) { 2080 if (!nlh) {
2082 err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info); 2081 err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
2083 if (err) 2082 if (err)
2084 goto err_skb_send_alloc; 2083 return err;
2085 goto send_done; 2084 goto send_done;
2086 } 2085 }
2087 return genlmsg_reply(dump_ctx.skb, info); 2086 return genlmsg_reply(dump_ctx.skb, info);
2088
2089err_entries_dump:
2090err_skb_send_alloc:
2091 genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
2092 nlmsg_free(dump_ctx.skb);
2093 return err;
2094} 2087}
2095 2088
2096static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb, 2089static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
@@ -2229,7 +2222,7 @@ send_done:
2229 if (!nlh) { 2222 if (!nlh) {
2230 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 2223 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
2231 if (err) 2224 if (err)
2232 goto err_skb_send_alloc; 2225 return err;
2233 goto send_done; 2226 goto send_done;
2234 } 2227 }
2235 return genlmsg_reply(skb, info); 2228 return genlmsg_reply(skb, info);
@@ -2237,7 +2230,6 @@ send_done:
2237nla_put_failure: 2230nla_put_failure:
2238 err = -EMSGSIZE; 2231 err = -EMSGSIZE;
2239err_table_put: 2232err_table_put:
2240err_skb_send_alloc:
2241 genlmsg_cancel(skb, hdr); 2233 genlmsg_cancel(skb, hdr);
2242 nlmsg_free(skb); 2234 nlmsg_free(skb);
2243 return err; 2235 return err;
diff --git a/net/core/filter.c b/net/core/filter.c
index c86f03fd9ea5..00c711c5f1a2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2283,6 +2283,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2283 u32 off = skb_mac_header_len(skb); 2283 u32 off = skb_mac_header_len(skb);
2284 int ret; 2284 int ret;
2285 2285
2286 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2287 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2288 return -ENOTSUPP;
2289
2286 ret = skb_cow(skb, len_diff); 2290 ret = skb_cow(skb, len_diff);
2287 if (unlikely(ret < 0)) 2291 if (unlikely(ret < 0))
2288 return ret; 2292 return ret;
@@ -2292,19 +2296,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2292 return ret; 2296 return ret;
2293 2297
2294 if (skb_is_gso(skb)) { 2298 if (skb_is_gso(skb)) {
2299 struct skb_shared_info *shinfo = skb_shinfo(skb);
2300
2295 /* SKB_GSO_TCPV4 needs to be changed into 2301 /* SKB_GSO_TCPV4 needs to be changed into
2296 * SKB_GSO_TCPV6. 2302 * SKB_GSO_TCPV6.
2297 */ 2303 */
2298 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 2304 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2299 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; 2305 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2300 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 2306 shinfo->gso_type |= SKB_GSO_TCPV6;
2301 } 2307 }
2302 2308
2303 /* Due to IPv6 header, MSS needs to be downgraded. */ 2309 /* Due to IPv6 header, MSS needs to be downgraded. */
2304 skb_shinfo(skb)->gso_size -= len_diff; 2310 skb_decrease_gso_size(shinfo, len_diff);
2305 /* Header must be checked, and gso_segs recomputed. */ 2311 /* Header must be checked, and gso_segs recomputed. */
2306 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2312 shinfo->gso_type |= SKB_GSO_DODGY;
2307 skb_shinfo(skb)->gso_segs = 0; 2313 shinfo->gso_segs = 0;
2308 } 2314 }
2309 2315
2310 skb->protocol = htons(ETH_P_IPV6); 2316 skb->protocol = htons(ETH_P_IPV6);
@@ -2319,6 +2325,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2319 u32 off = skb_mac_header_len(skb); 2325 u32 off = skb_mac_header_len(skb);
2320 int ret; 2326 int ret;
2321 2327
2328 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2329 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2330 return -ENOTSUPP;
2331
2322 ret = skb_unclone(skb, GFP_ATOMIC); 2332 ret = skb_unclone(skb, GFP_ATOMIC);
2323 if (unlikely(ret < 0)) 2333 if (unlikely(ret < 0))
2324 return ret; 2334 return ret;
@@ -2328,19 +2338,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2328 return ret; 2338 return ret;
2329 2339
2330 if (skb_is_gso(skb)) { 2340 if (skb_is_gso(skb)) {
2341 struct skb_shared_info *shinfo = skb_shinfo(skb);
2342
2331 /* SKB_GSO_TCPV6 needs to be changed into 2343 /* SKB_GSO_TCPV6 needs to be changed into
2332 * SKB_GSO_TCPV4. 2344 * SKB_GSO_TCPV4.
2333 */ 2345 */
2334 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 2346 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2335 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; 2347 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2336 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 2348 shinfo->gso_type |= SKB_GSO_TCPV4;
2337 } 2349 }
2338 2350
2339 /* Due to IPv4 header, MSS can be upgraded. */ 2351 /* Due to IPv4 header, MSS can be upgraded. */
2340 skb_shinfo(skb)->gso_size += len_diff; 2352 skb_increase_gso_size(shinfo, len_diff);
2341 /* Header must be checked, and gso_segs recomputed. */ 2353 /* Header must be checked, and gso_segs recomputed. */
2342 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2354 shinfo->gso_type |= SKB_GSO_DODGY;
2343 skb_shinfo(skb)->gso_segs = 0; 2355 shinfo->gso_segs = 0;
2344 } 2356 }
2345 2357
2346 skb->protocol = htons(ETH_P_IP); 2358 skb->protocol = htons(ETH_P_IP);
@@ -2439,6 +2451,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2439 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2451 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2440 int ret; 2452 int ret;
2441 2453
2454 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2455 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2456 return -ENOTSUPP;
2457
2442 ret = skb_cow(skb, len_diff); 2458 ret = skb_cow(skb, len_diff);
2443 if (unlikely(ret < 0)) 2459 if (unlikely(ret < 0))
2444 return ret; 2460 return ret;
@@ -2448,11 +2464,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2448 return ret; 2464 return ret;
2449 2465
2450 if (skb_is_gso(skb)) { 2466 if (skb_is_gso(skb)) {
2467 struct skb_shared_info *shinfo = skb_shinfo(skb);
2468
2451 /* Due to header grow, MSS needs to be downgraded. */ 2469 /* Due to header grow, MSS needs to be downgraded. */
2452 skb_shinfo(skb)->gso_size -= len_diff; 2470 skb_decrease_gso_size(shinfo, len_diff);
2453 /* Header must be checked, and gso_segs recomputed. */ 2471 /* Header must be checked, and gso_segs recomputed. */
2454 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2472 shinfo->gso_type |= SKB_GSO_DODGY;
2455 skb_shinfo(skb)->gso_segs = 0; 2473 shinfo->gso_segs = 0;
2456 } 2474 }
2457 2475
2458 return 0; 2476 return 0;
@@ -2463,6 +2481,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2463 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); 2481 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2464 int ret; 2482 int ret;
2465 2483
2484 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2485 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2486 return -ENOTSUPP;
2487
2466 ret = skb_unclone(skb, GFP_ATOMIC); 2488 ret = skb_unclone(skb, GFP_ATOMIC);
2467 if (unlikely(ret < 0)) 2489 if (unlikely(ret < 0))
2468 return ret; 2490 return ret;
@@ -2472,11 +2494,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2472 return ret; 2494 return ret;
2473 2495
2474 if (skb_is_gso(skb)) { 2496 if (skb_is_gso(skb)) {
2497 struct skb_shared_info *shinfo = skb_shinfo(skb);
2498
2475 /* Due to header shrink, MSS can be upgraded. */ 2499 /* Due to header shrink, MSS can be upgraded. */
2476 skb_shinfo(skb)->gso_size += len_diff; 2500 skb_increase_gso_size(shinfo, len_diff);
2477 /* Header must be checked, and gso_segs recomputed. */ 2501 /* Header must be checked, and gso_segs recomputed. */
2478 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 2502 shinfo->gso_type |= SKB_GSO_DODGY;
2479 skb_shinfo(skb)->gso_segs = 0; 2503 shinfo->gso_segs = 0;
2480 } 2504 }
2481 2505
2482 return 0; 2506 return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 715c13495ba6..46cb22215ff4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4181,7 +4181,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4181 4181
4182 skb_queue_tail(&sk->sk_error_queue, skb); 4182 skb_queue_tail(&sk->sk_error_queue, skb);
4183 if (!sock_flag(sk, SOCK_DEAD)) 4183 if (!sock_flag(sk, SOCK_DEAD))
4184 sk->sk_data_ready(sk); 4184 sk->sk_error_report(sk);
4185 return 0; 4185 return 0;
4186} 4186}
4187EXPORT_SYMBOL(sock_queue_err_skb); 4187EXPORT_SYMBOL(sock_queue_err_skb);
@@ -4906,7 +4906,7 @@ static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4906 thlen += inner_tcp_hdrlen(skb); 4906 thlen += inner_tcp_hdrlen(skb);
4907 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4907 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4908 thlen = tcp_hdrlen(skb); 4908 thlen = tcp_hdrlen(skb);
4909 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { 4909 } else if (unlikely(skb_is_gso_sctp(skb))) {
4910 thlen = sizeof(struct sctphdr); 4910 thlen = sizeof(struct sctphdr);
4911 } 4911 }
4912 /* UFO sets gso_size to the size of the fragmentation 4912 /* UFO sets gso_size to the size of the fragmentation
@@ -5022,13 +5022,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5022 5022
5023static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5023static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5024{ 5024{
5025 int mac_len;
5026
5025 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5027 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5026 kfree_skb(skb); 5028 kfree_skb(skb);
5027 return NULL; 5029 return NULL;
5028 } 5030 }
5029 5031
5030 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 5032 mac_len = skb->data - skb_mac_header(skb);
5031 2 * ETH_ALEN); 5033 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5034 mac_len - VLAN_HLEN - ETH_TLEN);
5032 skb->mac_header += VLAN_HLEN; 5035 skb->mac_header += VLAN_HLEN;
5033 return skb; 5036 return skb;
5034} 5037}
diff --git a/net/core/sock.c b/net/core/sock.c
index f704324d1219..e689496dfd8a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3326,6 +3326,27 @@ void proto_unregister(struct proto *prot)
3326} 3326}
3327EXPORT_SYMBOL(proto_unregister); 3327EXPORT_SYMBOL(proto_unregister);
3328 3328
3329int sock_load_diag_module(int family, int protocol)
3330{
3331 if (!protocol) {
3332 if (!sock_is_registered(family))
3333 return -ENOENT;
3334
3335 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3336 NETLINK_SOCK_DIAG, family);
3337 }
3338
3339#ifdef CONFIG_INET
3340 if (family == AF_INET &&
3341 !rcu_access_pointer(inet_protos[protocol]))
3342 return -ENOENT;
3343#endif
3344
3345 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3346 NETLINK_SOCK_DIAG, family, protocol);
3347}
3348EXPORT_SYMBOL(sock_load_diag_module);
3349
3329#ifdef CONFIG_PROC_FS 3350#ifdef CONFIG_PROC_FS
3330static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 3351static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3331 __acquires(proto_list_mutex) 3352 __acquires(proto_list_mutex)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index aee5642affd9..a3392a8f9276 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
220 return -EINVAL; 220 return -EINVAL;
221 221
222 if (sock_diag_handlers[req->sdiag_family] == NULL) 222 if (sock_diag_handlers[req->sdiag_family] == NULL)
223 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 223 sock_load_diag_module(req->sdiag_family, 0);
224 NETLINK_SOCK_DIAG, req->sdiag_family);
225 224
226 mutex_lock(&sock_diag_table_mutex); 225 mutex_lock(&sock_diag_table_mutex);
227 hndl = sock_diag_handlers[req->sdiag_family]; 226 hndl = sock_diag_handlers[req->sdiag_family];
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
247 case TCPDIAG_GETSOCK: 246 case TCPDIAG_GETSOCK:
248 case DCCPDIAG_GETSOCK: 247 case DCCPDIAG_GETSOCK:
249 if (inet_rcv_compat == NULL) 248 if (inet_rcv_compat == NULL)
250 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 249 sock_load_diag_module(AF_INET, 0);
251 NETLINK_SOCK_DIAG, AF_INET);
252 250
253 mutex_lock(&sock_diag_table_mutex); 251 mutex_lock(&sock_diag_table_mutex);
254 if (inet_rcv_compat != NULL) 252 if (inet_rcv_compat != NULL)
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group)
281 case SKNLGRP_INET_TCP_DESTROY: 279 case SKNLGRP_INET_TCP_DESTROY:
282 case SKNLGRP_INET_UDP_DESTROY: 280 case SKNLGRP_INET_UDP_DESTROY:
283 if (!sock_diag_handlers[AF_INET]) 281 if (!sock_diag_handlers[AF_INET])
284 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 282 sock_load_diag_module(AF_INET, 0);
285 NETLINK_SOCK_DIAG, AF_INET);
286 break; 283 break;
287 case SKNLGRP_INET6_TCP_DESTROY: 284 case SKNLGRP_INET6_TCP_DESTROY:
288 case SKNLGRP_INET6_UDP_DESTROY: 285 case SKNLGRP_INET6_UDP_DESTROY:
289 if (!sock_diag_handlers[AF_INET6]) 286 if (!sock_diag_handlers[AF_INET6])
290 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 287 sock_load_diag_module(AF_INET6, 0);
291 NETLINK_SOCK_DIAG, AF_INET6);
292 break; 288 break;
293 } 289 }
294 return 0; 290 return 0;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 15bdc002d90c..84cd4e3fd01b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -794,6 +794,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
794 if (skb == NULL) 794 if (skb == NULL)
795 goto out_release; 795 goto out_release;
796 796
797 if (sk->sk_state == DCCP_CLOSED) {
798 rc = -ENOTCONN;
799 goto out_discard;
800 }
801
797 skb_reserve(skb, sk->sk_prot->max_header); 802 skb_reserve(skb, sk->sk_prot->max_header);
798 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 803 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
799 if (rc != 0) 804 if (rc != 0)
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index cb54b81d0bd9..42a7b85b84e1 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds,
194 ds->ports[i].dn = cd->port_dn[i]; 194 ds->ports[i].dn = cd->port_dn[i];
195 ds->ports[i].cpu_dp = dst->cpu_dp; 195 ds->ports[i].cpu_dp = dst->cpu_dp;
196 196
197 if (dsa_is_user_port(ds, i)) 197 if (!dsa_is_user_port(ds, i))
198 continue; 198 continue;
199 199
200 ret = dsa_slave_create(&ds->ports[i]); 200 ret = dsa_slave_create(&ds->ports[i]);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index e4f305320519..275449b0d633 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -207,9 +207,13 @@ static inline void lowpan_netlink_fini(void)
207static int lowpan_device_event(struct notifier_block *unused, 207static int lowpan_device_event(struct notifier_block *unused,
208 unsigned long event, void *ptr) 208 unsigned long event, void *ptr)
209{ 209{
210 struct net_device *wdev = netdev_notifier_info_to_dev(ptr); 210 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
211 struct wpan_dev *wpan_dev;
211 212
212 if (wdev->type != ARPHRD_IEEE802154) 213 if (ndev->type != ARPHRD_IEEE802154)
214 return NOTIFY_DONE;
215 wpan_dev = ndev->ieee802154_ptr;
216 if (!wpan_dev)
213 return NOTIFY_DONE; 217 return NOTIFY_DONE;
214 218
215 switch (event) { 219 switch (event) {
@@ -218,8 +222,8 @@ static int lowpan_device_event(struct notifier_block *unused,
218 * also delete possible lowpan interfaces which belongs 222 * also delete possible lowpan interfaces which belongs
219 * to the wpan interface. 223 * to the wpan interface.
220 */ 224 */
221 if (wdev->ieee802154_ptr->lowpan_dev) 225 if (wpan_dev->lowpan_dev)
222 lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); 226 lowpan_dellink(wpan_dev->lowpan_dev, NULL);
223 break; 227 break;
224 default: 228 default:
225 return NOTIFY_DONE; 229 return NOTIFY_DONE;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index a383f299ce24..4e5bc4b2f14e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -53,8 +53,7 @@ static DEFINE_MUTEX(inet_diag_table_mutex);
53static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 53static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
54{ 54{
55 if (!inet_diag_table[proto]) 55 if (!inet_diag_table[proto])
56 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 56 sock_load_diag_module(AF_INET, proto);
57 NETLINK_SOCK_DIAG, AF_INET, proto);
58 57
59 mutex_lock(&inet_diag_table_mutex); 58 mutex_lock(&inet_diag_table_mutex);
60 if (!inet_diag_table[proto]) 59 if (!inet_diag_table[proto])
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 26a3d0315728..e8ec28999f5c 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -119,6 +119,9 @@ out:
119 119
120static bool inet_fragq_should_evict(const struct inet_frag_queue *q) 120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{ 121{
122 if (!hlist_unhashed(&q->list_evictor))
123 return false;
124
122 return q->net->low_thresh == 0 || 125 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh; 126 frag_mem_limit(q->net) >= q->net->low_thresh;
124} 127}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e74ee837b300..4ac5728689f5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -635,6 +635,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
635static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) 635static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
636{ 636{
637 rt->rt_pmtu = fnhe->fnhe_pmtu; 637 rt->rt_pmtu = fnhe->fnhe_pmtu;
638 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
638 rt->dst.expires = fnhe->fnhe_expires; 639 rt->dst.expires = fnhe->fnhe_expires;
639 640
640 if (fnhe->fnhe_gw) { 641 if (fnhe->fnhe_gw) {
@@ -645,7 +646,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
645} 646}
646 647
647static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, 648static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
648 u32 pmtu, unsigned long expires) 649 u32 pmtu, bool lock, unsigned long expires)
649{ 650{
650 struct fnhe_hash_bucket *hash; 651 struct fnhe_hash_bucket *hash;
651 struct fib_nh_exception *fnhe; 652 struct fib_nh_exception *fnhe;
@@ -682,8 +683,10 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
682 fnhe->fnhe_genid = genid; 683 fnhe->fnhe_genid = genid;
683 if (gw) 684 if (gw)
684 fnhe->fnhe_gw = gw; 685 fnhe->fnhe_gw = gw;
685 if (pmtu) 686 if (pmtu) {
686 fnhe->fnhe_pmtu = pmtu; 687 fnhe->fnhe_pmtu = pmtu;
688 fnhe->fnhe_mtu_locked = lock;
689 }
687 fnhe->fnhe_expires = max(1UL, expires); 690 fnhe->fnhe_expires = max(1UL, expires);
688 /* Update all cached dsts too */ 691 /* Update all cached dsts too */
689 rt = rcu_dereference(fnhe->fnhe_rth_input); 692 rt = rcu_dereference(fnhe->fnhe_rth_input);
@@ -707,6 +710,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
707 fnhe->fnhe_daddr = daddr; 710 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_gw = gw; 711 fnhe->fnhe_gw = gw;
709 fnhe->fnhe_pmtu = pmtu; 712 fnhe->fnhe_pmtu = pmtu;
713 fnhe->fnhe_mtu_locked = lock;
710 fnhe->fnhe_expires = expires; 714 fnhe->fnhe_expires = expires;
711 715
712 /* Exception created; mark the cached routes for the nexthop 716 /* Exception created; mark the cached routes for the nexthop
@@ -788,7 +792,8 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
788 struct fib_nh *nh = &FIB_RES_NH(res); 792 struct fib_nh *nh = &FIB_RES_NH(res);
789 793
790 update_or_create_fnhe(nh, fl4->daddr, new_gw, 794 update_or_create_fnhe(nh, fl4->daddr, new_gw,
791 0, jiffies + ip_rt_gc_timeout); 795 0, false,
796 jiffies + ip_rt_gc_timeout);
792 } 797 }
793 if (kill_route) 798 if (kill_route)
794 rt->dst.obsolete = DST_OBSOLETE_KILL; 799 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1010,15 +1015,18 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1010{ 1015{
1011 struct dst_entry *dst = &rt->dst; 1016 struct dst_entry *dst = &rt->dst;
1012 struct fib_result res; 1017 struct fib_result res;
1018 bool lock = false;
1013 1019
1014 if (dst_metric_locked(dst, RTAX_MTU)) 1020 if (ip_mtu_locked(dst))
1015 return; 1021 return;
1016 1022
1017 if (ipv4_mtu(dst) < mtu) 1023 if (ipv4_mtu(dst) < mtu)
1018 return; 1024 return;
1019 1025
1020 if (mtu < ip_rt_min_pmtu) 1026 if (mtu < ip_rt_min_pmtu) {
1027 lock = true;
1021 mtu = ip_rt_min_pmtu; 1028 mtu = ip_rt_min_pmtu;
1029 }
1022 1030
1023 if (rt->rt_pmtu == mtu && 1031 if (rt->rt_pmtu == mtu &&
1024 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2)) 1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
@@ -1028,7 +1036,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1028 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { 1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1029 struct fib_nh *nh = &FIB_RES_NH(res); 1037 struct fib_nh *nh = &FIB_RES_NH(res);
1030 1038
1031 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, 1039 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1032 jiffies + ip_rt_mtu_expires); 1040 jiffies + ip_rt_mtu_expires);
1033 } 1041 }
1034 rcu_read_unlock(); 1042 rcu_read_unlock();
@@ -1281,7 +1289,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1281 1289
1282 mtu = READ_ONCE(dst->dev->mtu); 1290 mtu = READ_ONCE(dst->dev->mtu);
1283 1291
1284 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { 1292 if (unlikely(ip_mtu_locked(dst))) {
1285 if (rt->rt_uses_gateway && mtu > 576) 1293 if (rt->rt_uses_gateway && mtu > 576)
1286 mtu = 576; 1294 mtu = 576;
1287 } 1295 }
@@ -1394,7 +1402,7 @@ struct uncached_list {
1394 1402
1395static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); 1403static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1396 1404
1397static void rt_add_uncached_list(struct rtable *rt) 1405void rt_add_uncached_list(struct rtable *rt)
1398{ 1406{
1399 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list); 1407 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1400 1408
@@ -1405,14 +1413,8 @@ static void rt_add_uncached_list(struct rtable *rt)
1405 spin_unlock_bh(&ul->lock); 1413 spin_unlock_bh(&ul->lock);
1406} 1414}
1407 1415
1408static void ipv4_dst_destroy(struct dst_entry *dst) 1416void rt_del_uncached_list(struct rtable *rt)
1409{ 1417{
1410 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1411 struct rtable *rt = (struct rtable *) dst;
1412
1413 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1414 kfree(p);
1415
1416 if (!list_empty(&rt->rt_uncached)) { 1418 if (!list_empty(&rt->rt_uncached)) {
1417 struct uncached_list *ul = rt->rt_uncached_list; 1419 struct uncached_list *ul = rt->rt_uncached_list;
1418 1420
@@ -1422,6 +1424,17 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
1422 } 1424 }
1423} 1425}
1424 1426
1427static void ipv4_dst_destroy(struct dst_entry *dst)
1428{
1429 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1430 struct rtable *rt = (struct rtable *)dst;
1431
1432 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
1433 kfree(p);
1434
1435 rt_del_uncached_list(rt);
1436}
1437
1425void rt_flush_dev(struct net_device *dev) 1438void rt_flush_dev(struct net_device *dev)
1426{ 1439{
1427 struct net *net = dev_net(dev); 1440 struct net *net = dev_net(dev);
@@ -1517,6 +1530,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
1517 rt->rt_is_input = 0; 1530 rt->rt_is_input = 0;
1518 rt->rt_iif = 0; 1531 rt->rt_iif = 0;
1519 rt->rt_pmtu = 0; 1532 rt->rt_pmtu = 0;
1533 rt->rt_mtu_locked = 0;
1520 rt->rt_gateway = 0; 1534 rt->rt_gateway = 0;
1521 rt->rt_uses_gateway = 0; 1535 rt->rt_uses_gateway = 0;
1522 INIT_LIST_HEAD(&rt->rt_uncached); 1536 INIT_LIST_HEAD(&rt->rt_uncached);
@@ -2533,6 +2547,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2533 rt->rt_is_input = ort->rt_is_input; 2547 rt->rt_is_input = ort->rt_is_input;
2534 rt->rt_iif = ort->rt_iif; 2548 rt->rt_iif = ort->rt_iif;
2535 rt->rt_pmtu = ort->rt_pmtu; 2549 rt->rt_pmtu = ort->rt_pmtu;
2550 rt->rt_mtu_locked = ort->rt_mtu_locked;
2536 2551
2537 rt->rt_genid = rt_genid_ipv4(net); 2552 rt->rt_genid = rt_genid_ipv4(net);
2538 rt->rt_flags = ort->rt_flags; 2553 rt->rt_flags = ort->rt_flags;
@@ -2635,6 +2650,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2635 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); 2650 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2636 if (rt->rt_pmtu && expires) 2651 if (rt->rt_pmtu && expires)
2637 metrics[RTAX_MTU - 1] = rt->rt_pmtu; 2652 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2653 if (rt->rt_mtu_locked && expires)
2654 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2638 if (rtnetlink_put_metrics(skb, metrics) < 0) 2655 if (rtnetlink_put_metrics(skb, metrics) < 0)
2639 goto nla_put_failure; 2656 goto nla_put_failure;
2640 2657
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d763fae1b574..0c31be306572 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3545,6 +3545,7 @@ int tcp_abort(struct sock *sk, int err)
3545 3545
3546 bh_unlock_sock(sk); 3546 bh_unlock_sock(sk);
3547 local_bh_enable(); 3547 local_bh_enable();
3548 tcp_write_queue_purge(sk);
3548 release_sock(sk); 3549 release_sock(sk);
3549 return 0; 3550 return 0;
3550} 3551}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 71fc60f1b326..f7d944855f8e 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -34,6 +34,7 @@ static void tcp_write_err(struct sock *sk)
34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 34 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
35 sk->sk_error_report(sk); 35 sk->sk_error_report(sk);
36 36
37 tcp_write_queue_purge(sk);
37 tcp_done(sk); 38 tcp_done(sk);
38 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); 39 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
39} 40}
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 63faeee989a9..2a9764bd1719 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0c752dc3f93b..6c76a757fa4a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -100,7 +100,9 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
100 xdst->u.rt.rt_gateway = rt->rt_gateway; 100 xdst->u.rt.rt_gateway = rt->rt_gateway;
101 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway; 101 xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
102 xdst->u.rt.rt_pmtu = rt->rt_pmtu; 102 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
103 xdst->u.rt.rt_mtu_locked = rt->rt_mtu_locked;
103 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached); 104 INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
105 rt_add_uncached_list(&xdst->u.rt);
104 106
105 return 0; 107 return 0;
106} 108}
@@ -240,7 +242,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
240 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 242 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
241 243
242 dst_destroy_metrics_generic(dst); 244 dst_destroy_metrics_generic(dst);
243 245 if (xdst->u.rt.rt_uncached_list)
246 rt_del_uncached_list(&xdst->u.rt);
244 xfrm_dst_destroy(xdst); 247 xfrm_dst_destroy(xdst);
245} 248}
246 249
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b27333d7b099..88bc2ef7c7a8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -146,10 +146,12 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 146 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
147 struct inet_sock *inet = inet_sk(sk); 147 struct inet_sock *inet = inet_sk(sk);
148 struct ipv6_pinfo *np = inet6_sk(sk); 148 struct ipv6_pinfo *np = inet6_sk(sk);
149 struct in6_addr *daddr; 149 struct in6_addr *daddr, old_daddr;
150 __be32 fl6_flowlabel = 0;
151 __be32 old_fl6_flowlabel;
152 __be16 old_dport;
150 int addr_type; 153 int addr_type;
151 int err; 154 int err;
152 __be32 fl6_flowlabel = 0;
153 155
154 if (usin->sin6_family == AF_INET) { 156 if (usin->sin6_family == AF_INET) {
155 if (__ipv6_only_sock(sk)) 157 if (__ipv6_only_sock(sk))
@@ -238,9 +240,13 @@ ipv4_connected:
238 } 240 }
239 } 241 }
240 242
243 /* save the current peer information before updating it */
244 old_daddr = sk->sk_v6_daddr;
245 old_fl6_flowlabel = np->flow_label;
246 old_dport = inet->inet_dport;
247
241 sk->sk_v6_daddr = *daddr; 248 sk->sk_v6_daddr = *daddr;
242 np->flow_label = fl6_flowlabel; 249 np->flow_label = fl6_flowlabel;
243
244 inet->inet_dport = usin->sin6_port; 250 inet->inet_dport = usin->sin6_port;
245 251
246 /* 252 /*
@@ -250,11 +256,12 @@ ipv4_connected:
250 256
251 err = ip6_datagram_dst_update(sk, true); 257 err = ip6_datagram_dst_update(sk, true);
252 if (err) { 258 if (err) {
253 /* Reset daddr and dport so that udp_v6_early_demux() 259 /* Restore the socket peer info, to keep it consistent with
254 * fails to find this socket 260 * the old socket state
255 */ 261 */
256 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); 262 sk->sk_v6_daddr = old_daddr;
257 inet->inet_dport = 0; 263 np->flow_label = old_fl6_flowlabel;
264 inet->inet_dport = old_dport;
258 goto out; 265 goto out;
259 } 266 }
260 267
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6adbcf40cf8c..3a98c694da5f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -126,7 +126,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
126 struct ip6_tnl *t, *cand = NULL; 126 struct ip6_tnl *t, *cand = NULL;
127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 127 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
128 int dev_type = (gre_proto == htons(ETH_P_TEB) || 128 int dev_type = (gre_proto == htons(ETH_P_TEB) ||
129 gre_proto == htons(ETH_P_ERSPAN)) ? 129 gre_proto == htons(ETH_P_ERSPAN) ||
130 gre_proto == htons(ETH_P_ERSPAN2)) ?
130 ARPHRD_ETHER : ARPHRD_IP6GRE; 131 ARPHRD_ETHER : ARPHRD_IP6GRE;
131 int score, cand_score = 4; 132 int score, cand_score = 4;
132 133
@@ -905,6 +906,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
905 truncate = true; 906 truncate = true;
906 } 907 }
907 908
909 if (skb_cow_head(skb, dev->needed_headroom))
910 goto tx_err;
911
908 t->parms.o_flags &= ~TUNNEL_KEY; 912 t->parms.o_flags &= ~TUNNEL_KEY;
909 IPCB(skb)->flags = 0; 913 IPCB(skb)->flags = 0;
910 914
@@ -947,6 +951,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
947 md->u.md2.dir, 951 md->u.md2.dir,
948 get_hwid(&md->u.md2), 952 get_hwid(&md->u.md2),
949 truncate, false); 953 truncate, false);
954 } else {
955 goto tx_err;
950 } 956 }
951 } else { 957 } else {
952 switch (skb->protocol) { 958 switch (skb->protocol) {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 10024eb0c521..d1d0b2fa7a07 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1554,7 +1554,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
1554 *(opt++) = (rd_len >> 3); 1554 *(opt++) = (rd_len >> 3);
1555 opt += 6; 1555 opt += 6;
1556 1556
1557 memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); 1557 skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
1558 rd_len - 8);
1558} 1559}
1559 1560
1560void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) 1561void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 939d122e71b4..a2ed9fdd58d4 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,7 +128,7 @@ struct uncached_list {
128 128
129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); 129static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
130 130
131static void rt6_uncached_list_add(struct rt6_info *rt) 131void rt6_uncached_list_add(struct rt6_info *rt)
132{ 132{
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); 133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
134 134
@@ -139,7 +139,7 @@ static void rt6_uncached_list_add(struct rt6_info *rt)
139 spin_unlock_bh(&ul->lock); 139 spin_unlock_bh(&ul->lock);
140} 140}
141 141
142static void rt6_uncached_list_del(struct rt6_info *rt) 142void rt6_uncached_list_del(struct rt6_info *rt)
143{ 143{
144 if (!list_empty(&rt->rt6i_uncached)) { 144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list; 145 struct uncached_list *ul = rt->rt6i_uncached_list;
@@ -1514,7 +1514,30 @@ static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1514 } 1514 }
1515} 1515}
1516 1516
1517static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) 1517static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1518 struct rt6_info *rt, int mtu)
1519{
1520 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1521 * lowest MTU in the path: always allow updating the route PMTU to
1522 * reflect PMTU decreases.
1523 *
1524 * If the new MTU is higher, and the route PMTU is equal to the local
1525 * MTU, this means the old MTU is the lowest in the path, so allow
1526 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1527 * handle this.
1528 */
1529
1530 if (dst_mtu(&rt->dst) >= mtu)
1531 return true;
1532
1533 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1534 return true;
1535
1536 return false;
1537}
1538
1539static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1540 struct rt6_info *rt, int mtu)
1518{ 1541{
1519 struct rt6_exception_bucket *bucket; 1542 struct rt6_exception_bucket *bucket;
1520 struct rt6_exception *rt6_ex; 1543 struct rt6_exception *rt6_ex;
@@ -1523,20 +1546,22 @@ static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1523 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1546 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1524 lockdep_is_held(&rt6_exception_lock)); 1547 lockdep_is_held(&rt6_exception_lock));
1525 1548
1526 if (bucket) { 1549 if (!bucket)
1527 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { 1550 return;
1528 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { 1551
1529 struct rt6_info *entry = rt6_ex->rt6i; 1552 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1530 /* For RTF_CACHE with rt6i_pmtu == 0 1553 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1531 * (i.e. a redirected route), 1554 struct rt6_info *entry = rt6_ex->rt6i;
1532 * the metrics of its rt->dst.from has already 1555
1533 * been updated. 1556 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1534 */ 1557 * route), the metrics of its rt->dst.from have already
1535 if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) 1558 * been updated.
1536 entry->rt6i_pmtu = mtu; 1559 */
1537 } 1560 if (entry->rt6i_pmtu &&
1538 bucket++; 1561 rt6_mtu_change_route_allowed(idev, entry, mtu))
1562 entry->rt6i_pmtu = mtu;
1539 } 1563 }
1564 bucket++;
1540 } 1565 }
1541} 1566}
1542 1567
@@ -3899,25 +3924,13 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3899 Since RFC 1981 doesn't include administrative MTU increase 3924 Since RFC 1981 doesn't include administrative MTU increase
3900 update PMTU increase is a MUST. (i.e. jumbo frame) 3925 update PMTU increase is a MUST. (i.e. jumbo frame)
3901 */ 3926 */
3902 /*
3903 If new MTU is less than route PMTU, this new MTU will be the
3904 lowest MTU in the path, update the route PMTU to reflect PMTU
3905 decreases; if new MTU is greater than route PMTU, and the
3906 old MTU is the lowest MTU in the path, update the route PMTU
3907 to reflect the increase. In this case if the other nodes' MTU
3908 also have the lowest MTU, TOO BIG MESSAGE will be lead to
3909 PMTU discovery.
3910 */
3911 if (rt->dst.dev == arg->dev && 3927 if (rt->dst.dev == arg->dev &&
3912 dst_metric_raw(&rt->dst, RTAX_MTU) &&
3913 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 3928 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3914 spin_lock_bh(&rt6_exception_lock); 3929 spin_lock_bh(&rt6_exception_lock);
3915 if (dst_mtu(&rt->dst) >= arg->mtu || 3930 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3916 (dst_mtu(&rt->dst) < arg->mtu && 3931 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3917 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
3918 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); 3932 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3919 } 3933 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3920 rt6_exceptions_update_pmtu(rt, arg->mtu);
3921 spin_unlock_bh(&rt6_exception_lock); 3934 spin_unlock_bh(&rt6_exception_lock);
3922 } 3935 }
3923 return 0; 3936 return 0;
@@ -4189,6 +4202,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4189 r_cfg.fc_encap_type = nla_get_u16(nla); 4202 r_cfg.fc_encap_type = nla_get_u16(nla);
4190 } 4203 }
4191 4204
4205 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4192 rt = ip6_route_info_create(&r_cfg, extack); 4206 rt = ip6_route_info_create(&r_cfg, extack);
4193 if (IS_ERR(rt)) { 4207 if (IS_ERR(rt)) {
4194 err = PTR_ERR(rt); 4208 err = PTR_ERR(rt);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bd6cc688bd19..7a78dcfda68a 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -93,7 +93,8 @@ static void set_tun_src(struct net *net, struct net_device *dev,
93/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ 93/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
94int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) 94int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
95{ 95{
96 struct net *net = dev_net(skb_dst(skb)->dev); 96 struct dst_entry *dst = skb_dst(skb);
97 struct net *net = dev_net(dst->dev);
97 struct ipv6hdr *hdr, *inner_hdr; 98 struct ipv6hdr *hdr, *inner_hdr;
98 struct ipv6_sr_hdr *isrh; 99 struct ipv6_sr_hdr *isrh;
99 int hdrlen, tot_len, err; 100 int hdrlen, tot_len, err;
@@ -134,7 +135,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
134 isrh->nexthdr = proto; 135 isrh->nexthdr = proto;
135 136
136 hdr->daddr = isrh->segments[isrh->first_segment]; 137 hdr->daddr = isrh->segments[isrh->first_segment];
137 set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr); 138 set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
138 139
139#ifdef CONFIG_IPV6_SEG6_HMAC 140#ifdef CONFIG_IPV6_SEG6_HMAC
140 if (sr_has_hmac(isrh)) { 141 if (sr_has_hmac(isrh)) {
@@ -418,7 +419,7 @@ static int seg6_build_state(struct nlattr *nla,
418 419
419 slwt = seg6_lwt_lwtunnel(newts); 420 slwt = seg6_lwt_lwtunnel(newts);
420 421
421 err = dst_cache_init(&slwt->cache, GFP_KERNEL); 422 err = dst_cache_init(&slwt->cache, GFP_ATOMIC);
422 if (err) { 423 if (err) {
423 kfree(newts); 424 kfree(newts);
424 return err; 425 return err;
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index bb935a3b7fea..de1b0b8c53b0 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -92,7 +92,8 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
92 92
93 skb_reset_network_header(skb); 93 skb_reset_network_header(skb);
94 skb_mac_header_rebuild(skb); 94 skb_mac_header_rebuild(skb);
95 eth_hdr(skb)->h_proto = skb->protocol; 95 if (skb->mac_len)
96 eth_hdr(skb)->h_proto = skb->protocol;
96 97
97 err = 0; 98 err = 0;
98 99
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 88cd0c90fa81..cbb270bd81b0 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -113,6 +113,9 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway; 113 xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst; 114 xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
115 xdst->u.rt6.rt6i_src = rt->rt6i_src; 115 xdst->u.rt6.rt6i_src = rt->rt6i_src;
116 INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
117 rt6_uncached_list_add(&xdst->u.rt6);
118 atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
116 119
117 return 0; 120 return 0;
118} 121}
@@ -244,6 +247,8 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
244 if (likely(xdst->u.rt6.rt6i_idev)) 247 if (likely(xdst->u.rt6.rt6i_idev))
245 in6_dev_put(xdst->u.rt6.rt6i_idev); 248 in6_dev_put(xdst->u.rt6.rt6i_idev);
246 dst_destroy_metrics_generic(dst); 249 dst_destroy_metrics_generic(dst);
250 if (xdst->u.rt6.rt6i_uncached_list)
251 rt6_uncached_list_del(&xdst->u.rt6);
247 xfrm_dst_destroy(xdst); 252 xfrm_dst_destroy(xdst);
248} 253}
249 254
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 81ce15ffb878..893a022f9620 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -2432,9 +2432,11 @@ static int afiucv_iucv_init(void)
2432 af_iucv_dev->driver = &af_iucv_driver; 2432 af_iucv_dev->driver = &af_iucv_driver;
2433 err = device_register(af_iucv_dev); 2433 err = device_register(af_iucv_dev);
2434 if (err) 2434 if (err)
2435 goto out_driver; 2435 goto out_iucv_dev;
2436 return 0; 2436 return 0;
2437 2437
2438out_iucv_dev:
2439 put_device(af_iucv_dev);
2438out_driver: 2440out_driver:
2439 driver_unregister(&af_iucv_driver); 2441 driver_unregister(&af_iucv_driver);
2440out_iucv: 2442out_iucv:
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index a6cd0712e063..516cfad71b85 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1381,24 +1381,32 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1381 .parse_msg = kcm_parse_func_strparser, 1381 .parse_msg = kcm_parse_func_strparser,
1382 .read_sock_done = kcm_read_sock_done, 1382 .read_sock_done = kcm_read_sock_done,
1383 }; 1383 };
1384 int err; 1384 int err = 0;
1385 1385
1386 csk = csock->sk; 1386 csk = csock->sk;
1387 if (!csk) 1387 if (!csk)
1388 return -EINVAL; 1388 return -EINVAL;
1389 1389
1390 lock_sock(csk);
1391
1390 /* Only allow TCP sockets to be attached for now */ 1392 /* Only allow TCP sockets to be attached for now */
1391 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || 1393 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1392 csk->sk_protocol != IPPROTO_TCP) 1394 csk->sk_protocol != IPPROTO_TCP) {
1393 return -EOPNOTSUPP; 1395 err = -EOPNOTSUPP;
1396 goto out;
1397 }
1394 1398
1395 /* Don't allow listeners or closed sockets */ 1399 /* Don't allow listeners or closed sockets */
1396 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) 1400 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1397 return -EOPNOTSUPP; 1401 err = -EOPNOTSUPP;
1402 goto out;
1403 }
1398 1404
1399 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL); 1405 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1400 if (!psock) 1406 if (!psock) {
1401 return -ENOMEM; 1407 err = -ENOMEM;
1408 goto out;
1409 }
1402 1410
1403 psock->mux = mux; 1411 psock->mux = mux;
1404 psock->sk = csk; 1412 psock->sk = csk;
@@ -1407,7 +1415,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1407 err = strp_init(&psock->strp, csk, &cb); 1415 err = strp_init(&psock->strp, csk, &cb);
1408 if (err) { 1416 if (err) {
1409 kmem_cache_free(kcm_psockp, psock); 1417 kmem_cache_free(kcm_psockp, psock);
1410 return err; 1418 goto out;
1411 } 1419 }
1412 1420
1413 write_lock_bh(&csk->sk_callback_lock); 1421 write_lock_bh(&csk->sk_callback_lock);
@@ -1420,7 +1428,8 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1420 strp_stop(&psock->strp); 1428 strp_stop(&psock->strp);
1421 strp_done(&psock->strp); 1429 strp_done(&psock->strp);
1422 kmem_cache_free(kcm_psockp, psock); 1430 kmem_cache_free(kcm_psockp, psock);
1423 return -EALREADY; 1431 err = -EALREADY;
1432 goto out;
1424 } 1433 }
1425 1434
1426 psock->save_data_ready = csk->sk_data_ready; 1435 psock->save_data_ready = csk->sk_data_ready;
@@ -1456,7 +1465,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1456 /* Schedule RX work in case there are already bytes queued */ 1465 /* Schedule RX work in case there are already bytes queued */
1457 strp_check_rcv(&psock->strp); 1466 strp_check_rcv(&psock->strp);
1458 1467
1459 return 0; 1468out:
1469 release_sock(csk);
1470
1471 return err;
1460} 1472}
1461 1473
1462static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info) 1474static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
@@ -1508,6 +1520,7 @@ static void kcm_unattach(struct kcm_psock *psock)
1508 1520
1509 if (WARN_ON(psock->rx_kcm)) { 1521 if (WARN_ON(psock->rx_kcm)) {
1510 write_unlock_bh(&csk->sk_callback_lock); 1522 write_unlock_bh(&csk->sk_callback_lock);
1523 release_sock(csk);
1511 return; 1524 return;
1512 } 1525 }
1513 1526
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 189a12a5e4ac..b86868da50d4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,13 @@ struct l2tp_net {
111 spinlock_t l2tp_session_hlist_lock; 111 spinlock_t l2tp_session_hlist_lock;
112}; 112};
113 113
114#if IS_ENABLED(CONFIG_IPV6)
115static bool l2tp_sk_is_v6(struct sock *sk)
116{
117 return sk->sk_family == PF_INET6 &&
118 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
119}
120#endif
114 121
115static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) 122static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
116{ 123{
@@ -1049,7 +1056,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1049 /* Queue the packet to IP for output */ 1056 /* Queue the packet to IP for output */
1050 skb->ignore_df = 1; 1057 skb->ignore_df = 1;
1051#if IS_ENABLED(CONFIG_IPV6) 1058#if IS_ENABLED(CONFIG_IPV6)
1052 if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped) 1059 if (l2tp_sk_is_v6(tunnel->sock))
1053 error = inet6_csk_xmit(tunnel->sock, skb, NULL); 1060 error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1054 else 1061 else
1055#endif 1062#endif
@@ -1112,6 +1119,15 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1112 goto out_unlock; 1119 goto out_unlock;
1113 } 1120 }
1114 1121
1122 /* The user-space may change the connection status for the user-space
1123 * provided socket at run time: we must check it under the socket lock
1124 */
1125 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1126 kfree_skb(skb);
1127 ret = NET_XMIT_DROP;
1128 goto out_unlock;
1129 }
1130
1115 /* Get routing info from the tunnel socket */ 1131 /* Get routing info from the tunnel socket */
1116 skb_dst_drop(skb); 1132 skb_dst_drop(skb);
1117 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); 1133 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
@@ -1131,7 +1147,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1131 1147
1132 /* Calculate UDP checksum if configured to do so */ 1148 /* Calculate UDP checksum if configured to do so */
1133#if IS_ENABLED(CONFIG_IPV6) 1149#if IS_ENABLED(CONFIG_IPV6)
1134 if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) 1150 if (l2tp_sk_is_v6(sk))
1135 udp6_set_csum(udp_get_no_check6_tx(sk), 1151 udp6_set_csum(udp_get_no_check6_tx(sk),
1136 skb, &inet6_sk(sk)->saddr, 1152 skb, &inet6_sk(sk)->saddr,
1137 &sk->sk_v6_daddr, udp_len); 1153 &sk->sk_v6_daddr, udp_len);
@@ -1457,9 +1473,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1457 encap = cfg->encap; 1473 encap = cfg->encap;
1458 1474
1459 /* Quick sanity checks */ 1475 /* Quick sanity checks */
1476 err = -EPROTONOSUPPORT;
1477 if (sk->sk_type != SOCK_DGRAM) {
1478 pr_debug("tunl %hu: fd %d wrong socket type\n",
1479 tunnel_id, fd);
1480 goto err;
1481 }
1460 switch (encap) { 1482 switch (encap) {
1461 case L2TP_ENCAPTYPE_UDP: 1483 case L2TP_ENCAPTYPE_UDP:
1462 err = -EPROTONOSUPPORT;
1463 if (sk->sk_protocol != IPPROTO_UDP) { 1484 if (sk->sk_protocol != IPPROTO_UDP) {
1464 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1485 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1465 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1486 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
@@ -1467,7 +1488,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1467 } 1488 }
1468 break; 1489 break;
1469 case L2TP_ENCAPTYPE_IP: 1490 case L2TP_ENCAPTYPE_IP:
1470 err = -EPROTONOSUPPORT;
1471 if (sk->sk_protocol != IPPROTO_L2TP) { 1491 if (sk->sk_protocol != IPPROTO_L2TP) {
1472 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1492 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1473 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); 1493 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
@@ -1507,24 +1527,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1507 if (cfg != NULL) 1527 if (cfg != NULL)
1508 tunnel->debug = cfg->debug; 1528 tunnel->debug = cfg->debug;
1509 1529
1510#if IS_ENABLED(CONFIG_IPV6)
1511 if (sk->sk_family == PF_INET6) {
1512 struct ipv6_pinfo *np = inet6_sk(sk);
1513
1514 if (ipv6_addr_v4mapped(&np->saddr) &&
1515 ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1516 struct inet_sock *inet = inet_sk(sk);
1517
1518 tunnel->v4mapped = true;
1519 inet->inet_saddr = np->saddr.s6_addr32[3];
1520 inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1521 inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1522 } else {
1523 tunnel->v4mapped = false;
1524 }
1525 }
1526#endif
1527
1528 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1530 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1529 tunnel->encap = encap; 1531 tunnel->encap = encap;
1530 if (encap == L2TP_ENCAPTYPE_UDP) { 1532 if (encap == L2TP_ENCAPTYPE_UDP) {
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a1aa9550f04e..2718d0b284d0 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -188,9 +188,6 @@ struct l2tp_tunnel {
188 struct sock *sock; /* Parent socket */ 188 struct sock *sock; /* Parent socket */
189 int fd; /* Parent fd, if tunnel socket 189 int fd; /* Parent fd, if tunnel socket
190 * was created by userspace */ 190 * was created by userspace */
191#if IS_ENABLED(CONFIG_IPV6)
192 bool v4mapped;
193#endif
194 191
195 struct work_struct del_work; 192 struct work_struct del_work;
196 193
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index a75653affbf7..b5adf3625d16 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -213,6 +213,7 @@ static const char *hw_flag_names[] = {
213 FLAG(SUPPORTS_TX_FRAG), 213 FLAG(SUPPORTS_TX_FRAG),
214 FLAG(SUPPORTS_TDLS_BUFFER_STA), 214 FLAG(SUPPORTS_TDLS_BUFFER_STA),
215 FLAG(DEAUTH_NEED_MGD_TX_PREP), 215 FLAG(DEAUTH_NEED_MGD_TX_PREP),
216 FLAG(DOESNT_SUPPORT_QOS_NDP),
216#undef FLAG 217#undef FLAG
217}; 218};
218 219
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0024eff9bb84..fe4aefb06d9f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -897,7 +897,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
897 struct ieee80211_hdr_3addr *nullfunc; 897 struct ieee80211_hdr_3addr *nullfunc;
898 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 898 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
899 899
900 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true); 900 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif,
901 !ieee80211_hw_check(&local->hw, DOESNT_SUPPORT_QOS_NDP));
901 if (!skb) 902 if (!skb)
902 return; 903 return;
903 904
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8e19c86d1aa6..fd13d28e4ca7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5423,6 +5423,7 @@ err:
5423static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) 5423static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
5424{ 5424{
5425 cancel_delayed_work_sync(&flowtable->data.gc_work); 5425 cancel_delayed_work_sync(&flowtable->data.gc_work);
5426 kfree(flowtable->ops);
5426 kfree(flowtable->name); 5427 kfree(flowtable->name);
5427 flowtable->data.type->free(&flowtable->data); 5428 flowtable->data.type->free(&flowtable->data);
5428 rhashtable_destroy(&flowtable->data.rhashtable); 5429 rhashtable_destroy(&flowtable->data.rhashtable);
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3f1624ee056f..d40591fe1b2f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -674,7 +674,7 @@ static const struct nft_set_ops *
674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc, 674nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
675 u32 flags) 675 u32 flags)
676{ 676{
677 if (desc->size) { 677 if (desc->size && !(flags & NFT_SET_TIMEOUT)) {
678 switch (desc->klen) { 678 switch (desc->klen) {
679 case 4: 679 case 4:
680 return &nft_hash_fast_ops; 680 return &nft_hash_fast_ops;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index d9deebe599ec..6de1f6a4cb80 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -423,6 +423,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
423 return buf; 423 return buf;
424} 424}
425 425
426/**
427 * xt_check_proc_name - check that name is suitable for /proc file creation
428 *
429 * @name: file name candidate
430 * @size: length of buffer
431 *
432 * some x_tables modules wish to create a file in /proc.
433 * This function makes sure that the name is suitable for this
434 * purpose, it checks that name is NUL terminated and isn't a 'special'
435 * name, like "..".
436 *
437 * returns negative number on error or 0 if name is useable.
438 */
439int xt_check_proc_name(const char *name, unsigned int size)
440{
441 if (name[0] == '\0')
442 return -EINVAL;
443
444 if (strnlen(name, size) == size)
445 return -ENAMETOOLONG;
446
447 if (strcmp(name, ".") == 0 ||
448 strcmp(name, "..") == 0 ||
449 strchr(name, '/'))
450 return -EINVAL;
451
452 return 0;
453}
454EXPORT_SYMBOL(xt_check_proc_name);
455
426int xt_check_match(struct xt_mtchk_param *par, 456int xt_check_match(struct xt_mtchk_param *par,
427 unsigned int size, u_int8_t proto, bool inv_proto) 457 unsigned int size, u_int8_t proto, bool inv_proto)
428{ 458{
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index db2fe0911740..ef65b7a9173e 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -917,8 +917,9 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
917 struct hashlimit_cfg3 cfg = {}; 917 struct hashlimit_cfg3 cfg = {};
918 int ret; 918 int ret;
919 919
920 if (info->name[sizeof(info->name) - 1] != '\0') 920 ret = xt_check_proc_name(info->name, sizeof(info->name));
921 return -EINVAL; 921 if (ret)
922 return ret;
922 923
923 ret = cfg_copy(&cfg, (void *)&info->cfg, 1); 924 ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
924 925
@@ -935,8 +936,9 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
935 struct hashlimit_cfg3 cfg = {}; 936 struct hashlimit_cfg3 cfg = {};
936 int ret; 937 int ret;
937 938
938 if (info->name[sizeof(info->name) - 1] != '\0') 939 ret = xt_check_proc_name(info->name, sizeof(info->name));
939 return -EINVAL; 940 if (ret)
941 return ret;
940 942
941 ret = cfg_copy(&cfg, (void *)&info->cfg, 2); 943 ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
942 944
@@ -950,9 +952,11 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
950static int hashlimit_mt_check(const struct xt_mtchk_param *par) 952static int hashlimit_mt_check(const struct xt_mtchk_param *par)
951{ 953{
952 struct xt_hashlimit_mtinfo3 *info = par->matchinfo; 954 struct xt_hashlimit_mtinfo3 *info = par->matchinfo;
955 int ret;
953 956
954 if (info->name[sizeof(info->name) - 1] != '\0') 957 ret = xt_check_proc_name(info->name, sizeof(info->name));
955 return -EINVAL; 958 if (ret)
959 return ret;
956 960
957 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg, 961 return hashlimit_mt_check_common(par, &info->hinfo, &info->cfg,
958 info->name, 3); 962 info->name, 3);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 19efdb757944..486dd24da78b 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -361,9 +361,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); 361 info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
362 return -EINVAL; 362 return -EINVAL;
363 } 363 }
364 if (info->name[0] == '\0' || 364 ret = xt_check_proc_name(info->name, sizeof(info->name));
365 strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) 365 if (ret)
366 return -EINVAL; 366 return ret;
367 367
368 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) 368 if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
369 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; 369 nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index a6f63a5faee7..af51b8c0a2cb 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1107,7 +1107,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
1107 if (!err) 1107 if (!err)
1108 delivered = true; 1108 delivered = true;
1109 else if (err != -ESRCH) 1109 else if (err != -ESRCH)
1110 goto error; 1110 return err;
1111 return delivered ? 0 : -ESRCH; 1111 return delivered ? 0 : -ESRCH;
1112 error: 1112 error:
1113 kfree_skb(skb); 1113 kfree_skb(skb);
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 04b94281a30b..b891a91577f8 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -242,14 +242,20 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
242 242
243 band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]); 243 band->type = nla_get_u32(attr[OVS_BAND_ATTR_TYPE]);
244 band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]); 244 band->rate = nla_get_u32(attr[OVS_BAND_ATTR_RATE]);
245 if (band->rate == 0) {
246 err = -EINVAL;
247 goto exit_free_meter;
248 }
249
245 band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]); 250 band->burst_size = nla_get_u32(attr[OVS_BAND_ATTR_BURST]);
246 /* Figure out max delta_t that is enough to fill any bucket. 251 /* Figure out max delta_t that is enough to fill any bucket.
247 * Keep max_delta_t size to the bucket units: 252 * Keep max_delta_t size to the bucket units:
248 * pkts => 1/1000 packets, kilobits => bits. 253 * pkts => 1/1000 packets, kilobits => bits.
254 *
255 * Start with a full bucket.
249 */ 256 */
250 band_max_delta_t = (band->burst_size + band->rate) * 1000; 257 band->bucket = (band->burst_size + band->rate) * 1000;
251 /* Start with a full bucket. */ 258 band_max_delta_t = band->bucket / band->rate;
252 band->bucket = band_max_delta_t;
253 if (band_max_delta_t > meter->max_delta_t) 259 if (band_max_delta_t > meter->max_delta_t)
254 meter->max_delta_t = band_max_delta_t; 260 meter->max_delta_t = band_max_delta_t;
255 band++; 261 band++;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index da72e0cf2b1f..5cb9b268e8ff 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -352,7 +352,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
352 return res; 352 return res;
353out: 353out:
354 if (res == ACT_P_CREATED) 354 if (res == ACT_P_CREATED)
355 tcf_idr_cleanup(*act, est); 355 tcf_idr_release(*act, bind);
356 356
357 return ret; 357 return ret;
358} 358}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 1fb1f1f6a555..a527e287c086 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -350,7 +350,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
350{ 350{
351 struct sctphdr *sctph; 351 struct sctphdr *sctph;
352 352
353 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) 353 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
354 return 1; 354 return 1;
355 355
356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph)); 356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
@@ -626,7 +626,8 @@ static void tcf_csum_cleanup(struct tc_action *a)
626 struct tcf_csum_params *params; 626 struct tcf_csum_params *params;
627 627
628 params = rcu_dereference_protected(p->params, 1); 628 params = rcu_dereference_protected(p->params, 1);
629 kfree_rcu(params, rcu); 629 if (params)
630 kfree_rcu(params, rcu);
630} 631}
631 632
632static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 633static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 10866717f88e..b5e8565b89c7 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -80,9 +80,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
80static void tcf_ipt_release(struct tc_action *a) 80static void tcf_ipt_release(struct tc_action *a)
81{ 81{
82 struct tcf_ipt *ipt = to_ipt(a); 82 struct tcf_ipt *ipt = to_ipt(a);
83 ipt_destroy_target(ipt->tcfi_t); 83
84 if (ipt->tcfi_t) {
85 ipt_destroy_target(ipt->tcfi_t);
86 kfree(ipt->tcfi_t);
87 }
84 kfree(ipt->tcfi_tname); 88 kfree(ipt->tcfi_tname);
85 kfree(ipt->tcfi_t);
86} 89}
87 90
88static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { 91static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -187,7 +190,7 @@ err2:
187 kfree(tname); 190 kfree(tname);
188err1: 191err1:
189 if (ret == ACT_P_CREATED) 192 if (ret == ACT_P_CREATED)
190 tcf_idr_cleanup(*a, est); 193 tcf_idr_release(*a, bind);
191 return err; 194 return err;
192} 195}
193 196
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 5e8cc8f63acd..f392ccaaa0d8 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -176,7 +176,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
176 p = to_pedit(*a); 176 p = to_pedit(*a);
177 keys = kmalloc(ksize, GFP_KERNEL); 177 keys = kmalloc(ksize, GFP_KERNEL);
178 if (keys == NULL) { 178 if (keys == NULL) {
179 tcf_idr_cleanup(*a, est); 179 tcf_idr_release(*a, bind);
180 kfree(keys_ex); 180 kfree(keys_ex);
181 return -ENOMEM; 181 return -ENOMEM;
182 } 182 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 51fe4fe343f7..7081ec75e696 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -196,7 +196,7 @@ failure:
196 qdisc_put_rtab(P_tab); 196 qdisc_put_rtab(P_tab);
197 qdisc_put_rtab(R_tab); 197 qdisc_put_rtab(R_tab);
198 if (ret == ACT_P_CREATED) 198 if (ret == ACT_P_CREATED)
199 tcf_idr_cleanup(*a, est); 199 tcf_idr_release(*a, bind);
200 return err; 200 return err;
201} 201}
202 202
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 238dfd27e995..3a89f98f17e6 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -103,7 +103,8 @@ static void tcf_sample_cleanup(struct tc_action *a)
103 103
104 psample_group = rtnl_dereference(s->psample_group); 104 psample_group = rtnl_dereference(s->psample_group);
105 RCU_INIT_POINTER(s->psample_group, NULL); 105 RCU_INIT_POINTER(s->psample_group, NULL);
106 psample_group_put(psample_group); 106 if (psample_group)
107 psample_group_put(psample_group);
107} 108}
108 109
109static bool tcf_sample_dev_ok_push(struct net_device *dev) 110static bool tcf_sample_dev_ok_push(struct net_device *dev)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 91816d73f3f3..e84768ae610a 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -121,7 +121,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
121 d = to_defact(*a); 121 d = to_defact(*a);
122 ret = alloc_defdata(d, defdata); 122 ret = alloc_defdata(d, defdata);
123 if (ret < 0) { 123 if (ret < 0) {
124 tcf_idr_cleanup(*a, est); 124 tcf_idr_release(*a, bind);
125 return ret; 125 return ret;
126 } 126 }
127 d->tcf_action = parm->action; 127 d->tcf_action = parm->action;
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index febec75f4f7a..142a996ac776 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -152,7 +152,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
152 ASSERT_RTNL(); 152 ASSERT_RTNL();
153 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); 153 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
154 if (unlikely(!p)) { 154 if (unlikely(!p)) {
155 if (ovr) 155 if (ret == ACT_P_CREATED)
156 tcf_idr_release(*a, bind); 156 tcf_idr_release(*a, bind);
157 return -ENOMEM; 157 return -ENOMEM;
158 } 158 }
@@ -190,7 +190,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a)
190 struct tcf_skbmod_params *p; 190 struct tcf_skbmod_params *p;
191 191
192 p = rcu_dereference_protected(d->skbmod_p, 1); 192 p = rcu_dereference_protected(d->skbmod_p, 1);
193 kfree_rcu(p, rcu); 193 if (p)
194 kfree_rcu(p, rcu);
194} 195}
195 196
196static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, 197static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 9169b7e78ada..a1c8dd406a04 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -153,6 +153,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
153 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; 153 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
154 break; 154 break;
155 default: 155 default:
156 ret = -EINVAL;
156 goto err_out; 157 goto err_out;
157 } 158 }
158 159
@@ -207,11 +208,12 @@ static void tunnel_key_release(struct tc_action *a)
207 struct tcf_tunnel_key_params *params; 208 struct tcf_tunnel_key_params *params;
208 209
209 params = rcu_dereference_protected(t->params, 1); 210 params = rcu_dereference_protected(t->params, 1);
211 if (params) {
212 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
213 dst_release(&params->tcft_enc_metadata->dst);
210 214
211 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) 215 kfree_rcu(params, rcu);
212 dst_release(&params->tcft_enc_metadata->dst); 216 }
213
214 kfree_rcu(params, rcu);
215} 217}
216 218
217static int tunnel_key_dump_addresses(struct sk_buff *skb, 219static int tunnel_key_dump_addresses(struct sk_buff *skb,
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index c2ee7fd51cc9..4595391c2129 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -195,7 +195,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
195 ASSERT_RTNL(); 195 ASSERT_RTNL();
196 p = kzalloc(sizeof(*p), GFP_KERNEL); 196 p = kzalloc(sizeof(*p), GFP_KERNEL);
197 if (!p) { 197 if (!p) {
198 if (ovr) 198 if (ret == ACT_P_CREATED)
199 tcf_idr_release(*a, bind); 199 tcf_idr_release(*a, bind);
200 return -ENOMEM; 200 return -ENOMEM;
201 } 201 }
@@ -225,7 +225,8 @@ static void tcf_vlan_cleanup(struct tc_action *a)
225 struct tcf_vlan_params *p; 225 struct tcf_vlan_params *p;
226 226
227 p = rcu_dereference_protected(v->vlan_p, 1); 227 p = rcu_dereference_protected(v->vlan_p, 1);
228 kfree_rcu(p, rcu); 228 if (p)
229 kfree_rcu(p, rcu);
229} 230}
230 231
231static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, 232static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 190570f21b20..7e3fbe9cc936 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -106,6 +106,14 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
106 106
107 __skb_queue_tail(&q->skb_bad_txq, skb); 107 __skb_queue_tail(&q->skb_bad_txq, skb);
108 108
109 if (qdisc_is_percpu_stats(q)) {
110 qdisc_qstats_cpu_backlog_inc(q, skb);
111 qdisc_qstats_cpu_qlen_inc(q);
112 } else {
113 qdisc_qstats_backlog_inc(q, skb);
114 q->q.qlen++;
115 }
116
109 if (lock) 117 if (lock)
110 spin_unlock(lock); 118 spin_unlock(lock);
111} 119}
@@ -196,14 +204,6 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
196 break; 204 break;
197 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { 205 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
198 qdisc_enqueue_skb_bad_txq(q, nskb); 206 qdisc_enqueue_skb_bad_txq(q, nskb);
199
200 if (qdisc_is_percpu_stats(q)) {
201 qdisc_qstats_cpu_backlog_inc(q, nskb);
202 qdisc_qstats_cpu_qlen_inc(q);
203 } else {
204 qdisc_qstats_backlog_inc(q, nskb);
205 q->q.qlen++;
206 }
207 break; 207 break;
208 } 208 }
209 skb->next = nskb; 209 skb->next = nskb;
@@ -628,6 +628,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
628 int band = prio2band[skb->priority & TC_PRIO_MAX]; 628 int band = prio2band[skb->priority & TC_PRIO_MAX];
629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc); 629 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
630 struct skb_array *q = band2list(priv, band); 630 struct skb_array *q = band2list(priv, band);
631 unsigned int pkt_len = qdisc_pkt_len(skb);
631 int err; 632 int err;
632 633
633 err = skb_array_produce(q, skb); 634 err = skb_array_produce(q, skb);
@@ -636,7 +637,10 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
636 return qdisc_drop_cpu(skb, qdisc, to_free); 637 return qdisc_drop_cpu(skb, qdisc, to_free);
637 638
638 qdisc_qstats_cpu_qlen_inc(qdisc); 639 qdisc_qstats_cpu_qlen_inc(qdisc);
639 qdisc_qstats_cpu_backlog_inc(qdisc, skb); 640 /* Note: skb can not be used after skb_array_produce(),
641 * so we better not use qdisc_qstats_cpu_backlog_inc()
642 */
643 this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
640 return NET_XMIT_SUCCESS; 644 return NET_XMIT_SUCCESS;
641} 645}
642 646
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7c179addebcd..7d6801fc5340 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -509,7 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
509 } 509 }
510 510
511 if (unlikely(sch->q.qlen >= sch->limit)) 511 if (unlikely(sch->q.qlen >= sch->limit))
512 return qdisc_drop(skb, sch, to_free); 512 return qdisc_drop_all(skb, sch, to_free);
513 513
514 qdisc_qstats_backlog_inc(sch, skb); 514 qdisc_qstats_backlog_inc(sch, skb);
515 515
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0247cc432e02..b381d78548ac 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -106,6 +106,7 @@ int sctp_rcv(struct sk_buff *skb)
106 int family; 106 int family;
107 struct sctp_af *af; 107 struct sctp_af *af;
108 struct net *net = dev_net(skb->dev); 108 struct net *net = dev_net(skb->dev);
109 bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
109 110
110 if (skb->pkt_type != PACKET_HOST) 111 if (skb->pkt_type != PACKET_HOST)
111 goto discard_it; 112 goto discard_it;
@@ -123,8 +124,7 @@ int sctp_rcv(struct sk_buff *skb)
123 * it's better to just linearize it otherwise crc computing 124 * it's better to just linearize it otherwise crc computing
124 * takes longer. 125 * takes longer.
125 */ 126 */
126 if ((!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && 127 if ((!is_gso && skb_linearize(skb)) ||
127 skb_linearize(skb)) ||
128 !pskb_may_pull(skb, sizeof(struct sctphdr))) 128 !pskb_may_pull(skb, sizeof(struct sctphdr)))
129 goto discard_it; 129 goto discard_it;
130 130
@@ -135,7 +135,7 @@ int sctp_rcv(struct sk_buff *skb)
135 if (skb_csum_unnecessary(skb)) 135 if (skb_csum_unnecessary(skb))
136 __skb_decr_checksum_unnecessary(skb); 136 __skb_decr_checksum_unnecessary(skb);
137 else if (!sctp_checksum_disable && 137 else if (!sctp_checksum_disable &&
138 !(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) && 138 !is_gso &&
139 sctp_rcv_checksum(net, skb) < 0) 139 sctp_rcv_checksum(net, skb) < 0)
140 goto discard_it; 140 goto discard_it;
141 skb->csum_valid = 1; 141 skb->csum_valid = 1;
@@ -1218,7 +1218,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1218 * issue as packets hitting this are mostly INIT or INIT-ACK and 1218 * issue as packets hitting this are mostly INIT or INIT-ACK and
1219 * those cannot be on GSO-style anyway. 1219 * those cannot be on GSO-style anyway.
1220 */ 1220 */
1221 if ((skb_shinfo(skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) 1221 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
1222 return NULL; 1222 return NULL;
1223 1223
1224 ch = (struct sctp_chunkhdr *)skb->data; 1224 ch = (struct sctp_chunkhdr *)skb->data;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 48392552ee7c..23ebc5318edc 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -170,7 +170,7 @@ next_chunk:
170 170
171 chunk = list_entry(entry, struct sctp_chunk, list); 171 chunk = list_entry(entry, struct sctp_chunk, list);
172 172
173 if ((skb_shinfo(chunk->skb)->gso_type & SKB_GSO_SCTP) == SKB_GSO_SCTP) { 173 if (skb_is_gso(chunk->skb) && skb_is_gso_sctp(chunk->skb)) {
174 /* GSO-marked skbs but without frags, handle 174 /* GSO-marked skbs but without frags, handle
175 * them normally 175 * them normally
176 */ 176 */
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 35bc7106d182..123e9f2dc226 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,7 +45,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)) 48 if (!skb_is_gso_sctp(skb))
49 goto out; 49 goto out;
50 50
51 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 86913eb5cfa0..5f8046c62d90 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -983,10 +983,6 @@ out:
983 lsmc->clcsock = NULL; 983 lsmc->clcsock = NULL;
984 } 984 }
985 release_sock(lsk); 985 release_sock(lsk);
986 /* no more listening, wake up smc_close_wait_listen_clcsock and
987 * accept
988 */
989 lsk->sk_state_change(lsk);
990 sock_put(&lsmc->sk); /* sock_hold in smc_listen */ 986 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
991} 987}
992 988
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index e339c0186dcf..fa41d9881741 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -30,27 +30,6 @@ static void smc_close_cleanup_listen(struct sock *parent)
30 smc_close_non_accepted(sk); 30 smc_close_non_accepted(sk);
31} 31}
32 32
33static void smc_close_wait_listen_clcsock(struct smc_sock *smc)
34{
35 DEFINE_WAIT_FUNC(wait, woken_wake_function);
36 struct sock *sk = &smc->sk;
37 signed long timeout;
38
39 timeout = SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME;
40 add_wait_queue(sk_sleep(sk), &wait);
41 do {
42 release_sock(sk);
43 if (smc->clcsock)
44 timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE,
45 timeout);
46 sched_annotate_sleep();
47 lock_sock(sk);
48 if (!smc->clcsock)
49 break;
50 } while (timeout);
51 remove_wait_queue(sk_sleep(sk), &wait);
52}
53
54/* wait for sndbuf data being transmitted */ 33/* wait for sndbuf data being transmitted */
55static void smc_close_stream_wait(struct smc_sock *smc, long timeout) 34static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
56{ 35{
@@ -204,9 +183,11 @@ again:
204 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); 183 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
205 /* wake up kernel_accept of smc_tcp_listen_worker */ 184 /* wake up kernel_accept of smc_tcp_listen_worker */
206 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk); 185 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
207 smc_close_wait_listen_clcsock(smc);
208 } 186 }
209 smc_close_cleanup_listen(sk); 187 smc_close_cleanup_listen(sk);
188 release_sock(sk);
189 flush_work(&smc->tcp_listen_work);
190 lock_sock(sk);
210 break; 191 break;
211 case SMC_ACTIVE: 192 case SMC_ACTIVE:
212 smc_close_stream_wait(smc, timeout); 193 smc_close_stream_wait(smc, timeout);
diff --git a/net/socket.c b/net/socket.c
index d9a1ac233b35..3d1948d27a25 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2590,6 +2590,11 @@ void sock_unregister(int family)
2590} 2590}
2591EXPORT_SYMBOL(sock_unregister); 2591EXPORT_SYMBOL(sock_unregister);
2592 2592
2593bool sock_is_registered(int family)
2594{
2595 return family < NPROTO && rcu_access_pointer(net_families[family]);
2596}
2597
2593static int __init sock_init(void) 2598static int __init sock_init(void)
2594{ 2599{
2595 int err; 2600 int err;
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index ccfdc7115a83..a00ec715aa46 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
283 struct crypto_comp *tfm; 283 struct crypto_comp *tfm;
284 284
285 /* This can be any valid CPU ID so we don't need locking. */ 285 /* This can be any valid CPU ID so we don't need locking. */
286 tfm = __this_cpu_read(*pos->tfms); 286 tfm = this_cpu_read(*pos->tfms);
287 287
288 if (!strcmp(crypto_comp_name(tfm), alg_name)) { 288 if (!strcmp(crypto_comp_name(tfm), alg_name)) {
289 pos->users++; 289 pos->users++;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 77d9d1ab05ce..cb3bb9ae4407 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1458,10 +1458,13 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1458static int xfrm_get_tos(const struct flowi *fl, int family) 1458static int xfrm_get_tos(const struct flowi *fl, int family)
1459{ 1459{
1460 const struct xfrm_policy_afinfo *afinfo; 1460 const struct xfrm_policy_afinfo *afinfo;
1461 int tos = 0; 1461 int tos;
1462 1462
1463 afinfo = xfrm_policy_get_afinfo(family); 1463 afinfo = xfrm_policy_get_afinfo(family);
1464 tos = afinfo ? afinfo->get_tos(fl) : 0; 1464 if (!afinfo)
1465 return 0;
1466
1467 tos = afinfo->get_tos(fl);
1465 1468
1466 rcu_read_unlock(); 1469 rcu_read_unlock();
1467 1470
@@ -1891,7 +1894,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
1891 spin_unlock(&pq->hold_queue.lock); 1894 spin_unlock(&pq->hold_queue.lock);
1892 1895
1893 dst_hold(xfrm_dst_path(dst)); 1896 dst_hold(xfrm_dst_path(dst));
1894 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0); 1897 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1895 if (IS_ERR(dst)) 1898 if (IS_ERR(dst))
1896 goto purge_queue; 1899 goto purge_queue;
1897 1900
@@ -2729,14 +2732,14 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2729 while (dst->xfrm) { 2732 while (dst->xfrm) {
2730 const struct xfrm_state *xfrm = dst->xfrm; 2733 const struct xfrm_state *xfrm = dst->xfrm;
2731 2734
2735 dst = xfrm_dst_child(dst);
2736
2732 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 2737 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2733 continue; 2738 continue;
2734 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 2739 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2735 daddr = xfrm->coaddr; 2740 daddr = xfrm->coaddr;
2736 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 2741 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2737 daddr = &xfrm->id.daddr; 2742 daddr = &xfrm->id.daddr;
2738
2739 dst = xfrm_dst_child(dst);
2740 } 2743 }
2741 return daddr; 2744 return daddr;
2742} 2745}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 1d38c6acf8af..9e3a5e85f828 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -660,7 +660,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
660 } else { 660 } else {
661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 661 XFRM_SKB_CB(skb)->seq.output.low = oseq + 1;
662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; 662 XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi;
663 xo->seq.low = oseq = oseq + 1; 663 xo->seq.low = oseq + 1;
664 xo->seq.hi = oseq_hi; 664 xo->seq.hi = oseq_hi;
665 oseq += skb_shinfo(skb)->gso_segs; 665 oseq += skb_shinfo(skb)->gso_segs;
666 } 666 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 54e21f19d722..f9d2f2233f09 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2056,6 +2056,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
2056 struct xfrm_mgr *km; 2056 struct xfrm_mgr *km;
2057 struct xfrm_policy *pol = NULL; 2057 struct xfrm_policy *pol = NULL;
2058 2058
2059#ifdef CONFIG_COMPAT
2060 if (in_compat_syscall())
2061 return -EOPNOTSUPP;
2062#endif
2063
2059 if (!optval && !optlen) { 2064 if (!optval && !optlen) {
2060 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2065 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2061 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2066 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index aff2e84ec761..e92b8c019c88 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 121 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
122 struct xfrm_replay_state_esn *rs; 122 struct xfrm_replay_state_esn *rs;
123 123
124 if (p->flags & XFRM_STATE_ESN) { 124 if (!rt)
125 if (!rt) 125 return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
126 return -EINVAL;
127 126
128 rs = nla_data(rt); 127 rs = nla_data(rt);
129 128
130 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) 129 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
131 return -EINVAL; 130 return -EINVAL;
132
133 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
134 nla_len(rt) != sizeof(*rs))
135 return -EINVAL;
136 }
137 131
138 if (!rt) 132 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
139 return 0; 133 nla_len(rt) != sizeof(*rs))
134 return -EINVAL;
140 135
141 /* As only ESP and AH support ESN feature. */ 136 /* As only ESP and AH support ESN feature. */
142 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) 137 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 5589bae34af6..a6f538b31ad6 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -297,11 +297,11 @@ cmd_dt_S_dtb= \
297 echo '\#include <asm-generic/vmlinux.lds.h>'; \ 297 echo '\#include <asm-generic/vmlinux.lds.h>'; \
298 echo '.section .dtb.init.rodata,"a"'; \ 298 echo '.section .dtb.init.rodata,"a"'; \
299 echo '.balign STRUCT_ALIGNMENT'; \ 299 echo '.balign STRUCT_ALIGNMENT'; \
300 echo '.global __dtb_$(*F)_begin'; \ 300 echo '.global __dtb_$(subst -,_,$(*F))_begin'; \
301 echo '__dtb_$(*F)_begin:'; \ 301 echo '__dtb_$(subst -,_,$(*F))_begin:'; \
302 echo '.incbin "$<" '; \ 302 echo '.incbin "$<" '; \
303 echo '__dtb_$(*F)_end:'; \ 303 echo '__dtb_$(subst -,_,$(*F))_end:'; \
304 echo '.global __dtb_$(*F)_end'; \ 304 echo '.global __dtb_$(subst -,_,$(*F))_end'; \
305 echo '.balign STRUCT_ALIGNMENT'; \ 305 echo '.balign STRUCT_ALIGNMENT'; \
306) > $@ 306) > $@
307 307
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index fa3d39b6f23b..449b68c4c90c 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -93,14 +93,6 @@
93 * (Note: it'd be easy to port over the complete mkdep state machine, 93 * (Note: it'd be easy to port over the complete mkdep state machine,
94 * but I don't think the added complexity is worth it) 94 * but I don't think the added complexity is worth it)
95 */ 95 */
96/*
97 * Note 2: if somebody writes HELLO_CONFIG_BOOM in a file, it will depend onto
98 * CONFIG_BOOM. This could seem a bug (not too hard to fix), but please do not
99 * fix it! Some UserModeLinux files (look at arch/um/) call CONFIG_BOOM as
100 * UML_CONFIG_BOOM, to avoid conflicts with /usr/include/linux/autoconf.h,
101 * through arch/um/include/uml-config.h; this fixdep "bug" makes sure that
102 * those files will have correct dependencies.
103 */
104 96
105#include <sys/types.h> 97#include <sys/types.h>
106#include <sys/stat.h> 98#include <sys/stat.h>
@@ -233,8 +225,13 @@ static int str_ends_with(const char *s, int slen, const char *sub)
233static void parse_config_file(const char *p) 225static void parse_config_file(const char *p)
234{ 226{
235 const char *q, *r; 227 const char *q, *r;
228 const char *start = p;
236 229
237 while ((p = strstr(p, "CONFIG_"))) { 230 while ((p = strstr(p, "CONFIG_"))) {
231 if (p > start && (isalnum(p[-1]) || p[-1] == '_')) {
232 p += 7;
233 continue;
234 }
238 p += 7; 235 p += 7;
239 q = p; 236 q = p;
240 while (*q && (isalnum(*q) || *q == '_')) 237 while (*q && (isalnum(*q) || *q == '_'))
@@ -286,8 +283,6 @@ static int is_ignored_file(const char *s, int len)
286{ 283{
287 return str_ends_with(s, len, "include/generated/autoconf.h") || 284 return str_ends_with(s, len, "include/generated/autoconf.h") ||
288 str_ends_with(s, len, "include/generated/autoksyms.h") || 285 str_ends_with(s, len, "include/generated/autoksyms.h") ||
289 str_ends_with(s, len, "arch/um/include/uml-config.h") ||
290 str_ends_with(s, len, "include/linux/kconfig.h") ||
291 str_ends_with(s, len, ".ver"); 286 str_ends_with(s, len, ".ver");
292} 287}
293 288
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
index 94b664817ad9..d84a5674e95e 100755
--- a/scripts/bloat-o-meter
+++ b/scripts/bloat-o-meter
@@ -15,7 +15,7 @@ signal(SIGPIPE, SIG_DFL)
15if len(sys.argv) < 3: 15if len(sys.argv) < 3:
16 sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0]) 16 sys.stderr.write("usage: %s [option] file1 file2\n" % sys.argv[0])
17 sys.stderr.write("The options are:\n") 17 sys.stderr.write("The options are:\n")
18 sys.stderr.write("-c cateogrize output based on symbole type\n") 18 sys.stderr.write("-c categorize output based on symbol type\n")
19 sys.stderr.write("-d Show delta of Data Section\n") 19 sys.stderr.write("-d Show delta of Data Section\n")
20 sys.stderr.write("-t Show delta of text Section\n") 20 sys.stderr.write("-t Show delta of text Section\n")
21 sys.exit(-1) 21 sys.exit(-1)
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index b044c0a5a674..02298c9c6020 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1762,10 +1762,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1762 return -ENOMEM; 1762 return -ENOMEM;
1763 _snd_pcm_hw_params_any(params); 1763 _snd_pcm_hw_params_any(params);
1764 err = snd_pcm_hw_refine(substream, params); 1764 err = snd_pcm_hw_refine(substream, params);
1765 format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
1766 kfree(params);
1767 if (err < 0) 1765 if (err < 0)
1768 return err; 1766 goto error;
1767 format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
1769 for (fmt = 0; fmt < 32; ++fmt) { 1768 for (fmt = 0; fmt < 32; ++fmt) {
1770 if (snd_mask_test(format_mask, fmt)) { 1769 if (snd_mask_test(format_mask, fmt)) {
1771 int f = snd_pcm_oss_format_to(fmt); 1770 int f = snd_pcm_oss_format_to(fmt);
@@ -1773,7 +1772,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
1773 formats |= f; 1772 formats |= f;
1774 } 1773 }
1775 } 1774 }
1776 return formats; 1775
1776 error:
1777 kfree(params);
1778 return err < 0 ? err : formats;
1777} 1779}
1778 1780
1779static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) 1781static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 04d4db44fae5..61a07fe34cd2 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -255,12 +255,12 @@ static int seq_free_client1(struct snd_seq_client *client)
255 255
256 if (!client) 256 if (!client)
257 return 0; 257 return 0;
258 snd_seq_delete_all_ports(client);
259 snd_seq_queue_client_leave(client->number);
260 spin_lock_irqsave(&clients_lock, flags); 258 spin_lock_irqsave(&clients_lock, flags);
261 clienttablock[client->number] = 1; 259 clienttablock[client->number] = 1;
262 clienttab[client->number] = NULL; 260 clienttab[client->number] = NULL;
263 spin_unlock_irqrestore(&clients_lock, flags); 261 spin_unlock_irqrestore(&clients_lock, flags);
262 snd_seq_delete_all_ports(client);
263 snd_seq_queue_client_leave(client->number);
264 snd_use_lock_sync(&client->use_lock); 264 snd_use_lock_sync(&client->use_lock);
265 snd_seq_queue_client_termination(client->number); 265 snd_seq_queue_client_termination(client->number);
266 if (client->pool) 266 if (client->pool)
@@ -910,7 +910,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
910static int snd_seq_client_enqueue_event(struct snd_seq_client *client, 910static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
911 struct snd_seq_event *event, 911 struct snd_seq_event *event,
912 struct file *file, int blocking, 912 struct file *file, int blocking,
913 int atomic, int hop) 913 int atomic, int hop,
914 struct mutex *mutexp)
914{ 915{
915 struct snd_seq_event_cell *cell; 916 struct snd_seq_event_cell *cell;
916 int err; 917 int err;
@@ -948,7 +949,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
948 return -ENXIO; /* queue is not allocated */ 949 return -ENXIO; /* queue is not allocated */
949 950
950 /* allocate an event cell */ 951 /* allocate an event cell */
951 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); 952 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
953 file, mutexp);
952 if (err < 0) 954 if (err < 0)
953 return err; 955 return err;
954 956
@@ -1017,12 +1019,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1017 return -ENXIO; 1019 return -ENXIO;
1018 1020
1019 /* allocate the pool now if the pool is not allocated yet */ 1021 /* allocate the pool now if the pool is not allocated yet */
1022 mutex_lock(&client->ioctl_mutex);
1020 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { 1023 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1021 mutex_lock(&client->ioctl_mutex);
1022 err = snd_seq_pool_init(client->pool); 1024 err = snd_seq_pool_init(client->pool);
1023 mutex_unlock(&client->ioctl_mutex);
1024 if (err < 0) 1025 if (err < 0)
1025 return -ENOMEM; 1026 goto out;
1026 } 1027 }
1027 1028
1028 /* only process whole events */ 1029 /* only process whole events */
@@ -1073,7 +1074,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1073 /* ok, enqueue it */ 1074 /* ok, enqueue it */
1074 err = snd_seq_client_enqueue_event(client, &event, file, 1075 err = snd_seq_client_enqueue_event(client, &event, file,
1075 !(file->f_flags & O_NONBLOCK), 1076 !(file->f_flags & O_NONBLOCK),
1076 0, 0); 1077 0, 0, &client->ioctl_mutex);
1077 if (err < 0) 1078 if (err < 0)
1078 break; 1079 break;
1079 1080
@@ -1084,6 +1085,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
1084 written += len; 1085 written += len;
1085 } 1086 }
1086 1087
1088 out:
1089 mutex_unlock(&client->ioctl_mutex);
1087 return written ? written : err; 1090 return written ? written : err;
1088} 1091}
1089 1092
@@ -1838,9 +1841,11 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
1838 (! snd_seq_write_pool_allocated(client) || 1841 (! snd_seq_write_pool_allocated(client) ||
1839 info->output_pool != client->pool->size)) { 1842 info->output_pool != client->pool->size)) {
1840 if (snd_seq_write_pool_allocated(client)) { 1843 if (snd_seq_write_pool_allocated(client)) {
1844 /* is the pool in use? */
1845 if (atomic_read(&client->pool->counter))
1846 return -EBUSY;
1841 /* remove all existing cells */ 1847 /* remove all existing cells */
1842 snd_seq_pool_mark_closing(client->pool); 1848 snd_seq_pool_mark_closing(client->pool);
1843 snd_seq_queue_client_leave_cells(client->number);
1844 snd_seq_pool_done(client->pool); 1849 snd_seq_pool_done(client->pool);
1845 } 1850 }
1846 client->pool->size = info->output_pool; 1851 client->pool->size = info->output_pool;
@@ -2260,7 +2265,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
2260 if (! cptr->accept_output) 2265 if (! cptr->accept_output)
2261 result = -EPERM; 2266 result = -EPERM;
2262 else /* send it */ 2267 else /* send it */
2263 result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); 2268 result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
2269 atomic, hop, NULL);
2264 2270
2265 snd_seq_client_unlock(cptr); 2271 snd_seq_client_unlock(cptr);
2266 return result; 2272 return result;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index a8c2822e0198..72c0302a55d2 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -125,7 +125,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
125 return -EINVAL; 125 return -EINVAL;
126 126
127 snd_use_lock_use(&f->use_lock); 127 snd_use_lock_use(&f->use_lock);
128 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ 128 err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
129 if (err < 0) { 129 if (err < 0) {
130 if ((err == -ENOMEM) || (err == -EAGAIN)) 130 if ((err == -ENOMEM) || (err == -EAGAIN))
131 atomic_inc(&f->overflow); 131 atomic_inc(&f->overflow);
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index f763682584a8..ab1112e90f88 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -220,7 +220,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
220 */ 220 */
221static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 221static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
222 struct snd_seq_event_cell **cellp, 222 struct snd_seq_event_cell **cellp,
223 int nonblock, struct file *file) 223 int nonblock, struct file *file,
224 struct mutex *mutexp)
224{ 225{
225 struct snd_seq_event_cell *cell; 226 struct snd_seq_event_cell *cell;
226 unsigned long flags; 227 unsigned long flags;
@@ -244,7 +245,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
244 set_current_state(TASK_INTERRUPTIBLE); 245 set_current_state(TASK_INTERRUPTIBLE);
245 add_wait_queue(&pool->output_sleep, &wait); 246 add_wait_queue(&pool->output_sleep, &wait);
246 spin_unlock_irq(&pool->lock); 247 spin_unlock_irq(&pool->lock);
248 if (mutexp)
249 mutex_unlock(mutexp);
247 schedule(); 250 schedule();
251 if (mutexp)
252 mutex_lock(mutexp);
248 spin_lock_irq(&pool->lock); 253 spin_lock_irq(&pool->lock);
249 remove_wait_queue(&pool->output_sleep, &wait); 254 remove_wait_queue(&pool->output_sleep, &wait);
250 /* interrupted? */ 255 /* interrupted? */
@@ -287,7 +292,7 @@ __error:
287 */ 292 */
288int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 293int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
289 struct snd_seq_event_cell **cellp, int nonblock, 294 struct snd_seq_event_cell **cellp, int nonblock,
290 struct file *file) 295 struct file *file, struct mutex *mutexp)
291{ 296{
292 int ncells, err; 297 int ncells, err;
293 unsigned int extlen; 298 unsigned int extlen;
@@ -304,7 +309,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
304 if (ncells >= pool->total_elements) 309 if (ncells >= pool->total_elements)
305 return -ENOMEM; 310 return -ENOMEM;
306 311
307 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 312 err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
308 if (err < 0) 313 if (err < 0)
309 return err; 314 return err;
310 315
@@ -330,7 +335,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
330 int size = sizeof(struct snd_seq_event); 335 int size = sizeof(struct snd_seq_event);
331 if (len < size) 336 if (len < size)
332 size = len; 337 size = len;
333 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 338 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
339 mutexp);
334 if (err < 0) 340 if (err < 0)
335 goto __error; 341 goto __error;
336 if (cell->event.data.ext.ptr == NULL) 342 if (cell->event.data.ext.ptr == NULL)
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 32f959c17786..3abe306c394a 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -66,7 +66,8 @@ struct snd_seq_pool {
66void snd_seq_cell_free(struct snd_seq_event_cell *cell); 66void snd_seq_cell_free(struct snd_seq_event_cell *cell);
67 67
68int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 68int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
69 struct snd_seq_event_cell **cellp, int nonblock, struct file *file); 69 struct snd_seq_event_cell **cellp, int nonblock,
70 struct file *file, struct mutex *mutexp);
70 71
71/* return number of unused (free) cells */ 72/* return number of unused (free) cells */
72static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) 73static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
index bc1c8488fc2a..2bc6759e4adc 100644
--- a/sound/core/seq/seq_prioq.c
+++ b/sound/core/seq/seq_prioq.c
@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo)
87 if (f->cells > 0) { 87 if (f->cells > 0) {
88 /* drain prioQ */ 88 /* drain prioQ */
89 while (f->cells > 0) 89 while (f->cells > 0)
90 snd_seq_cell_free(snd_seq_prioq_cell_out(f)); 90 snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL));
91 } 91 }
92 92
93 kfree(f); 93 kfree(f);
@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f,
214 return 0; 214 return 0;
215} 215}
216 216
217/* return 1 if the current time >= event timestamp */
218static int event_is_ready(struct snd_seq_event *ev, void *current_time)
219{
220 if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK)
221 return snd_seq_compare_tick_time(current_time, &ev->time.tick);
222 else
223 return snd_seq_compare_real_time(current_time, &ev->time.time);
224}
225
217/* dequeue cell from prioq */ 226/* dequeue cell from prioq */
218struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) 227struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
228 void *current_time)
219{ 229{
220 struct snd_seq_event_cell *cell; 230 struct snd_seq_event_cell *cell;
221 unsigned long flags; 231 unsigned long flags;
@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
227 spin_lock_irqsave(&f->lock, flags); 237 spin_lock_irqsave(&f->lock, flags);
228 238
229 cell = f->head; 239 cell = f->head;
240 if (cell && current_time && !event_is_ready(&cell->event, current_time))
241 cell = NULL;
230 if (cell) { 242 if (cell) {
231 f->head = cell->next; 243 f->head = cell->next;
232 244
@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f)
252 return f->cells; 264 return f->cells;
253} 265}
254 266
255
256/* peek at cell at the head of the prioq */
257struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f)
258{
259 if (f == NULL) {
260 pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n");
261 return NULL;
262 }
263 return f->head;
264}
265
266
267static inline int prioq_match(struct snd_seq_event_cell *cell, 267static inline int prioq_match(struct snd_seq_event_cell *cell,
268 int client, int timestamp) 268 int client, int timestamp)
269{ 269{
diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h
index d38bb78d9345..2c315ca10fc4 100644
--- a/sound/core/seq/seq_prioq.h
+++ b/sound/core/seq/seq_prioq.h
@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo);
44int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); 44int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell);
45 45
46/* dequeue cell from prioq */ 46/* dequeue cell from prioq */
47struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f); 47struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
48 void *current_time);
48 49
49/* return number of events available in prioq */ 50/* return number of events available in prioq */
50int snd_seq_prioq_avail(struct snd_seq_prioq *f); 51int snd_seq_prioq_avail(struct snd_seq_prioq *f);
51 52
52/* peek at cell at the head of the prioq */
53struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f);
54
55/* client left queue */ 53/* client left queue */
56void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); 54void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp);
57 55
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 0428e9061b47..b377f5048352 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
277 277
278 __again: 278 __again:
279 /* Process tick queue... */ 279 /* Process tick queue... */
280 while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { 280 for (;;) {
281 if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, 281 cell = snd_seq_prioq_cell_out(q->tickq,
282 &cell->event.time.tick)) { 282 &q->timer->tick.cur_tick);
283 cell = snd_seq_prioq_cell_out(q->tickq); 283 if (!cell)
284 if (cell)
285 snd_seq_dispatch_event(cell, atomic, hop);
286 } else {
287 /* event remains in the queue */
288 break; 284 break;
289 } 285 snd_seq_dispatch_event(cell, atomic, hop);
290 } 286 }
291 287
292
293 /* Process time queue... */ 288 /* Process time queue... */
294 while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { 289 for (;;) {
295 if (snd_seq_compare_real_time(&q->timer->cur_time, 290 cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
296 &cell->event.time.time)) { 291 if (!cell)
297 cell = snd_seq_prioq_cell_out(q->timeq);
298 if (cell)
299 snd_seq_dispatch_event(cell, atomic, hop);
300 } else {
301 /* event remains in the queue */
302 break; 292 break;
303 } 293 snd_seq_dispatch_event(cell, atomic, hop);
304 } 294 }
305 295
306 /* free lock */ 296 /* free lock */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 96143df19b21..d5017adf9feb 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -181,11 +181,15 @@ static const struct kernel_param_ops param_ops_xint = {
181}; 181};
182#define param_check_xint param_check_int 182#define param_check_xint param_check_int
183 183
184static int power_save = -1; 184static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
185module_param(power_save, xint, 0644); 185module_param(power_save, xint, 0644);
186MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " 186MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
187 "(in second, 0 = disable)."); 187 "(in second, 0 = disable).");
188 188
189static bool pm_blacklist = true;
190module_param(pm_blacklist, bool, 0644);
191MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist");
192
189/* reset the HD-audio controller in power save mode. 193/* reset the HD-audio controller in power save mode.
190 * this may give more power-saving, but will take longer time to 194 * this may give more power-saving, but will take longer time to
191 * wake up. 195 * wake up.
@@ -2300,10 +2304,9 @@ static int azx_probe_continue(struct azx *chip)
2300 2304
2301 val = power_save; 2305 val = power_save;
2302#ifdef CONFIG_PM 2306#ifdef CONFIG_PM
2303 if (val == -1) { 2307 if (pm_blacklist) {
2304 const struct snd_pci_quirk *q; 2308 const struct snd_pci_quirk *q;
2305 2309
2306 val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
2307 q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); 2310 q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
2308 if (q && val) { 2311 if (q && val) {
2309 dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", 2312 dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 37e1cf8218ff..5b4dbcec6de8 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -957,6 +957,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
957 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), 957 SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
958 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), 958 SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
959 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), 959 SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
960 SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
961 SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
960 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), 962 SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
961 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), 963 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
962 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), 964 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b9c93fa0a51c..9af301c6bba2 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5274,6 +5274,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
5274 } 5274 }
5275} 5275}
5276 5276
5277/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
5278static void alc295_fixup_disable_dac3(struct hda_codec *codec,
5279 const struct hda_fixup *fix, int action)
5280{
5281 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5282 hda_nid_t conn[2] = { 0x02, 0x03 };
5283 snd_hda_override_conn_list(codec, 0x17, 2, conn);
5284 }
5285}
5286
5277/* Hook to update amp GPIO4 for automute */ 5287/* Hook to update amp GPIO4 for automute */
5278static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, 5288static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
5279 struct hda_jack_callback *jack) 5289 struct hda_jack_callback *jack)
@@ -5466,6 +5476,7 @@ enum {
5466 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 5476 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
5467 ALC255_FIXUP_DELL_SPK_NOISE, 5477 ALC255_FIXUP_DELL_SPK_NOISE,
5468 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 5478 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5479 ALC295_FIXUP_DISABLE_DAC3,
5469 ALC280_FIXUP_HP_HEADSET_MIC, 5480 ALC280_FIXUP_HP_HEADSET_MIC,
5470 ALC221_FIXUP_HP_FRONT_MIC, 5481 ALC221_FIXUP_HP_FRONT_MIC,
5471 ALC292_FIXUP_TPT460, 5482 ALC292_FIXUP_TPT460,
@@ -5480,10 +5491,12 @@ enum {
5480 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, 5491 ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE,
5481 ALC233_FIXUP_LENOVO_MULTI_CODECS, 5492 ALC233_FIXUP_LENOVO_MULTI_CODECS,
5482 ALC294_FIXUP_LENOVO_MIC_LOCATION, 5493 ALC294_FIXUP_LENOVO_MIC_LOCATION,
5494 ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
5483 ALC700_FIXUP_INTEL_REFERENCE, 5495 ALC700_FIXUP_INTEL_REFERENCE,
5484 ALC274_FIXUP_DELL_BIND_DACS, 5496 ALC274_FIXUP_DELL_BIND_DACS,
5485 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 5497 ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
5486 ALC298_FIXUP_TPT470_DOCK, 5498 ALC298_FIXUP_TPT470_DOCK,
5499 ALC255_FIXUP_DUMMY_LINEOUT_VERB,
5487}; 5500};
5488 5501
5489static const struct hda_fixup alc269_fixups[] = { 5502static const struct hda_fixup alc269_fixups[] = {
@@ -6198,6 +6211,10 @@ static const struct hda_fixup alc269_fixups[] = {
6198 .chained = true, 6211 .chained = true,
6199 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, 6212 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
6200 }, 6213 },
6214 [ALC295_FIXUP_DISABLE_DAC3] = {
6215 .type = HDA_FIXUP_FUNC,
6216 .v.func = alc295_fixup_disable_dac3,
6217 },
6201 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { 6218 [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
6202 .type = HDA_FIXUP_PINS, 6219 .type = HDA_FIXUP_PINS,
6203 .v.pins = (const struct hda_pintbl[]) { 6220 .v.pins = (const struct hda_pintbl[]) {
@@ -6283,6 +6300,18 @@ static const struct hda_fixup alc269_fixups[] = {
6283 { } 6300 { }
6284 }, 6301 },
6285 }, 6302 },
6303 [ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE] = {
6304 .type = HDA_FIXUP_PINS,
6305 .v.pins = (const struct hda_pintbl[]) {
6306 { 0x16, 0x0101102f }, /* Rear Headset HP */
6307 { 0x19, 0x02a1913c }, /* use as Front headset mic, without its own jack detect */
6308 { 0x1a, 0x01a19030 }, /* Rear Headset MIC */
6309 { 0x1b, 0x02011020 },
6310 { }
6311 },
6312 .chained = true,
6313 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6314 },
6286 [ALC700_FIXUP_INTEL_REFERENCE] = { 6315 [ALC700_FIXUP_INTEL_REFERENCE] = {
6287 .type = HDA_FIXUP_VERBS, 6316 .type = HDA_FIXUP_VERBS,
6288 .v.verbs = (const struct hda_verb[]) { 6317 .v.verbs = (const struct hda_verb[]) {
@@ -6319,6 +6348,15 @@ static const struct hda_fixup alc269_fixups[] = {
6319 .chained = true, 6348 .chained = true,
6320 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE 6349 .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
6321 }, 6350 },
6351 [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
6352 .type = HDA_FIXUP_PINS,
6353 .v.pins = (const struct hda_pintbl[]) {
6354 { 0x14, 0x0201101f },
6355 { }
6356 },
6357 .chained = true,
6358 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
6359 },
6322}; 6360};
6323 6361
6324static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6362static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6367,10 +6405,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6367 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), 6405 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
6368 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6406 SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6369 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), 6407 SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6408 SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
6370 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), 6409 SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6410 SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
6371 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 6411 SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6372 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6412 SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6373 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB), 6413 SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6414 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
6374 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6415 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6375 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6416 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6376 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6417 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -6508,9 +6549,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6508 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK), 6549 SND_PCI_QUIRK(0x17aa, 0x2245, "Thinkpad T470", ALC298_FIXUP_TPT470_DOCK),
6509 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6550 SND_PCI_QUIRK(0x17aa, 0x2246, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6510 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6551 SND_PCI_QUIRK(0x17aa, 0x2247, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6552 SND_PCI_QUIRK(0x17aa, 0x2249, "Thinkpad", ALC292_FIXUP_TPT460),
6511 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6553 SND_PCI_QUIRK(0x17aa, 0x224b, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6512 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6554 SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6513 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), 6555 SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
6556 SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
6514 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6557 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6515 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6558 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6516 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6559 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -6872,7 +6915,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6872 {0x12, 0x90a60120}, 6915 {0x12, 0x90a60120},
6873 {0x14, 0x90170110}, 6916 {0x14, 0x90170110},
6874 {0x21, 0x0321101f}), 6917 {0x21, 0x0321101f}),
6875 SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 6918 SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
6876 {0x12, 0xb7a60130}, 6919 {0x12, 0xb7a60130},
6877 {0x14, 0x90170110}, 6920 {0x14, 0x90170110},
6878 {0x21, 0x04211020}), 6921 {0x21, 0x04211020}),
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index c33a512283a4..9fb356db3ab2 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -579,13 +579,6 @@ static int acp_init(void __iomem *acp_mmio, u32 asic_type)
579 for (bank = 1; bank < 48; bank++) 579 for (bank = 1; bank < 48; bank++)
580 acp_set_sram_bank_state(acp_mmio, bank, false); 580 acp_set_sram_bank_state(acp_mmio, bank, false);
581 } 581 }
582
583 /* Stoney supports 16bit resolution */
584 if (asic_type == CHIP_STONEY) {
585 val = acp_reg_read(acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN);
586 val |= 0x03;
587 acp_reg_write(val, acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN);
588 }
589 return 0; 582 return 0;
590} 583}
591 584
@@ -774,6 +767,7 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
774{ 767{
775 int status; 768 int status;
776 uint64_t size; 769 uint64_t size;
770 u32 val = 0;
777 struct page *pg; 771 struct page *pg;
778 struct snd_pcm_runtime *runtime; 772 struct snd_pcm_runtime *runtime;
779 struct audio_substream_data *rtd; 773 struct audio_substream_data *rtd;
@@ -786,6 +780,14 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
786 if (WARN_ON(!rtd)) 780 if (WARN_ON(!rtd))
787 return -EINVAL; 781 return -EINVAL;
788 782
783 if (adata->asic_type == CHIP_STONEY) {
784 val = acp_reg_read(adata->acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN);
785 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
786 val |= ACP_I2S_SP_16BIT_RESOLUTION_EN;
787 else
788 val |= ACP_I2S_MIC_16BIT_RESOLUTION_EN;
789 acp_reg_write(val, adata->acp_mmio, mmACP_I2S_16BIT_RESOLUTION_EN);
790 }
789 size = params_buffer_bytes(params); 791 size = params_buffer_bytes(params);
790 status = snd_pcm_lib_malloc_pages(substream, size); 792 status = snd_pcm_lib_malloc_pages(substream, size);
791 if (status < 0) 793 if (status < 0)
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index ecb458935d1e..9293f179f272 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -70,6 +70,8 @@
70#define CAPTURE_END_DMA_DESCR_CH15 7 70#define CAPTURE_END_DMA_DESCR_CH15 7
71 71
72#define mmACP_I2S_16BIT_RESOLUTION_EN 0x5209 72#define mmACP_I2S_16BIT_RESOLUTION_EN 0x5209
73#define ACP_I2S_MIC_16BIT_RESOLUTION_EN 0x01
74#define ACP_I2S_SP_16BIT_RESOLUTION_EN 0x02
73enum acp_dma_priority_level { 75enum acp_dma_priority_level {
74 /* 0x0 Specifies the DMA channel is given normal priority */ 76 /* 0x0 Specifies the DMA channel is given normal priority */
75 ACP_DMA_PRIORITY_LEVEL_NORMAL = 0x0, 77 ACP_DMA_PRIORITY_LEVEL_NORMAL = 0x0,
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 5672e516bec3..c1830ccd3bb8 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -798,12 +798,7 @@ static int hdmi_codec_probe(struct platform_device *pdev)
798 798
799static int hdmi_codec_remove(struct platform_device *pdev) 799static int hdmi_codec_remove(struct platform_device *pdev)
800{ 800{
801 struct device *dev = &pdev->dev; 801 snd_soc_unregister_codec(&pdev->dev);
802 struct hdmi_codec_priv *hcp;
803
804 hcp = dev_get_drvdata(dev);
805 kfree(hcp->chmap_info);
806 snd_soc_unregister_codec(dev);
807 802
808 return 0; 803 return 0;
809} 804}
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 831b297978a4..45a73049cf64 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -1722,6 +1722,7 @@ static const struct regmap_config rt5651_regmap = {
1722 .num_reg_defaults = ARRAY_SIZE(rt5651_reg), 1722 .num_reg_defaults = ARRAY_SIZE(rt5651_reg),
1723 .ranges = rt5651_ranges, 1723 .ranges = rt5651_ranges,
1724 .num_ranges = ARRAY_SIZE(rt5651_ranges), 1724 .num_ranges = ARRAY_SIZE(rt5651_ranges),
1725 .use_single_rw = true,
1725}; 1726};
1726 1727
1727#if defined(CONFIG_OF) 1728#if defined(CONFIG_OF)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e1ab5537d27a..c5c76ab8ccf1 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -529,10 +529,15 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
529static int sgtl5000_digital_mute(struct snd_soc_dai *codec_dai, int mute) 529static int sgtl5000_digital_mute(struct snd_soc_dai *codec_dai, int mute)
530{ 530{
531 struct snd_soc_codec *codec = codec_dai->codec; 531 struct snd_soc_codec *codec = codec_dai->codec;
532 u16 adcdac_ctrl = SGTL5000_DAC_MUTE_LEFT | SGTL5000_DAC_MUTE_RIGHT; 532 u16 i2s_pwr = SGTL5000_I2S_IN_POWERUP;
533 533
534 snd_soc_update_bits(codec, SGTL5000_CHIP_ADCDAC_CTRL, 534 /*
535 adcdac_ctrl, mute ? adcdac_ctrl : 0); 535 * During 'digital mute' do not mute DAC
536 * because LINE_IN would be muted aswell. We want to mute
537 * only I2S block - this can be done by powering it off
538 */
539 snd_soc_update_bits(codec, SGTL5000_CHIP_DIG_POWER,
540 i2s_pwr, mute ? 0 : i2s_pwr);
536 541
537 return 0; 542 return 0;
538} 543}
@@ -871,15 +876,26 @@ static int sgtl5000_pcm_hw_params(struct snd_pcm_substream *substream,
871static int sgtl5000_set_bias_level(struct snd_soc_codec *codec, 876static int sgtl5000_set_bias_level(struct snd_soc_codec *codec,
872 enum snd_soc_bias_level level) 877 enum snd_soc_bias_level level)
873{ 878{
879 struct sgtl5000_priv *sgtl = snd_soc_codec_get_drvdata(codec);
880 int ret;
881
874 switch (level) { 882 switch (level) {
875 case SND_SOC_BIAS_ON: 883 case SND_SOC_BIAS_ON:
876 case SND_SOC_BIAS_PREPARE: 884 case SND_SOC_BIAS_PREPARE:
877 case SND_SOC_BIAS_STANDBY: 885 case SND_SOC_BIAS_STANDBY:
886 regcache_cache_only(sgtl->regmap, false);
887 ret = regcache_sync(sgtl->regmap);
888 if (ret) {
889 regcache_cache_only(sgtl->regmap, true);
890 return ret;
891 }
892
878 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, 893 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
879 SGTL5000_REFTOP_POWERUP, 894 SGTL5000_REFTOP_POWERUP,
880 SGTL5000_REFTOP_POWERUP); 895 SGTL5000_REFTOP_POWERUP);
881 break; 896 break;
882 case SND_SOC_BIAS_OFF: 897 case SND_SOC_BIAS_OFF:
898 regcache_cache_only(sgtl->regmap, true);
883 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, 899 snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
884 SGTL5000_REFTOP_POWERUP, 0); 900 SGTL5000_REFTOP_POWERUP, 0);
885 break; 901 break;
@@ -1237,6 +1253,10 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
1237 */ 1253 */
1238 snd_soc_write(codec, SGTL5000_DAP_CTRL, 0); 1254 snd_soc_write(codec, SGTL5000_DAP_CTRL, 0);
1239 1255
1256 /* Unmute DAC after start */
1257 snd_soc_update_bits(codec, SGTL5000_CHIP_ADCDAC_CTRL,
1258 SGTL5000_DAC_MUTE_LEFT | SGTL5000_DAC_MUTE_RIGHT, 0);
1259
1240 return 0; 1260 return 0;
1241 1261
1242err: 1262err:
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 66e32f5d2917..989d093abda7 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1204,12 +1204,14 @@ static int wmfw_add_ctl(struct wm_adsp *dsp, struct wm_coeff_ctl *ctl)
1204 kcontrol->put = wm_coeff_put_acked; 1204 kcontrol->put = wm_coeff_put_acked;
1205 break; 1205 break;
1206 default: 1206 default:
1207 kcontrol->get = wm_coeff_get; 1207 if (kcontrol->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
1208 kcontrol->put = wm_coeff_put; 1208 ctl->bytes_ext.max = ctl->len;
1209 1209 ctl->bytes_ext.get = wm_coeff_tlv_get;
1210 ctl->bytes_ext.max = ctl->len; 1210 ctl->bytes_ext.put = wm_coeff_tlv_put;
1211 ctl->bytes_ext.get = wm_coeff_tlv_get; 1211 } else {
1212 ctl->bytes_ext.put = wm_coeff_tlv_put; 1212 kcontrol->get = wm_coeff_get;
1213 kcontrol->put = wm_coeff_put;
1214 }
1213 break; 1215 break;
1214 } 1216 }
1215 1217
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index dca1143c1150..a4aa931ebfae 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -104,7 +104,7 @@
104 104
105#define SUN8I_I2S_CHAN_CFG_REG 0x30 105#define SUN8I_I2S_CHAN_CFG_REG 0x30
106#define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM_MASK GENMASK(6, 4) 106#define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM_MASK GENMASK(6, 4)
107#define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM(chan) (chan - 1) 107#define SUN8I_I2S_CHAN_CFG_RX_SLOT_NUM(chan) ((chan - 1) << 4)
108#define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM_MASK GENMASK(2, 0) 108#define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM_MASK GENMASK(2, 0)
109#define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM(chan) (chan - 1) 109#define SUN8I_I2S_CHAN_CFG_TX_SLOT_NUM(chan) (chan - 1)
110 110
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 0dfe4d3f74e2..f41079da38c5 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -213,6 +213,7 @@
213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ 213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
214 214
215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
216#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
216 217
217/* Virtualization flags: Linux defined, word 8 */ 218/* Virtualization flags: Linux defined, word 8 */
218#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 219#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 0b482c0070e0..465995281dcd 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -55,6 +55,10 @@
55 55
56#include "main.h" 56#include "main.h"
57 57
58#ifndef BPF_FS_MAGIC
59#define BPF_FS_MAGIC 0xcafe4a11
60#endif
61
58void p_err(const char *fmt, ...) 62void p_err(const char *fmt, ...)
59{ 63{
60 va_list ap; 64 va_list ap;
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 0fb5ef939732..7b26d4b0b052 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt {
761#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 761#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
762#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 762#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
763#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) 763#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
764#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
764 765
765/* 766/*
766 * Extension capability list. 767 * Extension capability list.
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt {
934#define KVM_CAP_S390_AIS_MIGRATION 150 935#define KVM_CAP_S390_AIS_MIGRATION 150
935#define KVM_CAP_PPC_GET_CPU_CHAR 151 936#define KVM_CAP_PPC_GET_CPU_CHAR 151
936#define KVM_CAP_S390_BPB 152 937#define KVM_CAP_S390_BPB 152
938#define KVM_CAP_GET_MSR_FEATURES 153
937 939
938#ifdef KVM_CAP_IRQ_ROUTING 940#ifdef KVM_CAP_IRQ_ROUTING
939 941
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 46c1d239cc1b..92b6a2c21631 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1116,42 +1116,29 @@ static int read_unwind_hints(struct objtool_file *file)
1116 1116
1117static int read_retpoline_hints(struct objtool_file *file) 1117static int read_retpoline_hints(struct objtool_file *file)
1118{ 1118{
1119 struct section *sec, *relasec; 1119 struct section *sec;
1120 struct instruction *insn; 1120 struct instruction *insn;
1121 struct rela *rela; 1121 struct rela *rela;
1122 int i;
1123 1122
1124 sec = find_section_by_name(file->elf, ".discard.retpoline_safe"); 1123 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1125 if (!sec) 1124 if (!sec)
1126 return 0; 1125 return 0;
1127 1126
1128 relasec = sec->rela; 1127 list_for_each_entry(rela, &sec->rela_list, list) {
1129 if (!relasec) { 1128 if (rela->sym->type != STT_SECTION) {
1130 WARN("missing .rela.discard.retpoline_safe section"); 1129 WARN("unexpected relocation symbol type in %s", sec->name);
1131 return -1;
1132 }
1133
1134 if (sec->len % sizeof(unsigned long)) {
1135 WARN("retpoline_safe size mismatch: %d %ld", sec->len, sizeof(unsigned long));
1136 return -1;
1137 }
1138
1139 for (i = 0; i < sec->len / sizeof(unsigned long); i++) {
1140 rela = find_rela_by_dest(sec, i * sizeof(unsigned long));
1141 if (!rela) {
1142 WARN("can't find rela for retpoline_safe[%d]", i);
1143 return -1; 1130 return -1;
1144 } 1131 }
1145 1132
1146 insn = find_insn(file, rela->sym->sec, rela->addend); 1133 insn = find_insn(file, rela->sym->sec, rela->addend);
1147 if (!insn) { 1134 if (!insn) {
1148 WARN("can't find insn for retpoline_safe[%d]", i); 1135 WARN("bad .discard.retpoline_safe entry");
1149 return -1; 1136 return -1;
1150 } 1137 }
1151 1138
1152 if (insn->type != INSN_JUMP_DYNAMIC && 1139 if (insn->type != INSN_JUMP_DYNAMIC &&
1153 insn->type != INSN_CALL_DYNAMIC) { 1140 insn->type != INSN_CALL_DYNAMIC) {
1154 WARN_FUNC("retpoline_safe hint not a indirect jump/call", 1141 WARN_FUNC("retpoline_safe hint not an indirect jump/call",
1155 insn->sec, insn->offset); 1142 insn->sec, insn->offset);
1156 return -1; 1143 return -1;
1157 } 1144 }
diff --git a/tools/perf/Documentation/perf-kallsyms.txt b/tools/perf/Documentation/perf-kallsyms.txt
index 954ea9e21236..cf9f4040ea5c 100644
--- a/tools/perf/Documentation/perf-kallsyms.txt
+++ b/tools/perf/Documentation/perf-kallsyms.txt
@@ -8,7 +8,7 @@ perf-kallsyms - Searches running kernel for symbols
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf kallsyms <options> symbol_name[,symbol_name...]' 11'perf kallsyms' [<options>] symbol_name[,symbol_name...]
12 12
13DESCRIPTION 13DESCRIPTION
14----------- 14-----------
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index bf4ca749d1ac..a217623fec2e 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -881,6 +881,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
881 } 881 }
882 } 882 }
883 883
884 /*
885 * If we have just single event and are sending data
886 * through pipe, we need to force the ids allocation,
887 * because we synthesize event name through the pipe
888 * and need the id for that.
889 */
890 if (data->is_pipe && rec->evlist->nr_entries == 1)
891 rec->opts.sample_id = true;
892
884 if (record__open(rec) != 0) { 893 if (record__open(rec) != 0) {
885 err = -1; 894 err = -1;
886 goto out_child; 895 goto out_child;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 98bf9d32f222..54a4c152edb3 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -917,7 +917,7 @@ static void print_metric_csv(void *ctx,
917 char buf[64], *vals, *ends; 917 char buf[64], *vals, *ends;
918 918
919 if (unit == NULL || fmt == NULL) { 919 if (unit == NULL || fmt == NULL) {
920 fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep); 920 fprintf(out, "%s%s", csv_sep, csv_sep);
921 return; 921 return;
922 } 922 }
923 snprintf(buf, sizeof(buf), fmt, val); 923 snprintf(buf, sizeof(buf), fmt, val);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b7c823ba8374..35ac016fcb98 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -991,7 +991,7 @@ static int perf_top_overwrite_fallback(struct perf_top *top,
991 evlist__for_each_entry(evlist, counter) 991 evlist__for_each_entry(evlist, counter)
992 counter->attr.write_backward = false; 992 counter->attr.write_backward = false;
993 opts->overwrite = false; 993 opts->overwrite = false;
994 ui__warning("fall back to non-overwrite mode\n"); 994 pr_debug2("fall back to non-overwrite mode\n");
995 return 1; 995 return 1;
996} 996}
997 997
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index cfe46236a5e5..57b9b342d533 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -61,6 +61,7 @@ struct record_opts {
61 bool tail_synthesize; 61 bool tail_synthesize;
62 bool overwrite; 62 bool overwrite;
63 bool ignore_missing_thread; 63 bool ignore_missing_thread;
64 bool sample_id;
64 unsigned int freq; 65 unsigned int freq;
65 unsigned int mmap_pages; 66 unsigned int mmap_pages;
66 unsigned int auxtrace_mmap_pages; 67 unsigned int auxtrace_mmap_pages;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 286427975112..fbf927cf775d 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -327,7 +327,32 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
327 if (!disasm_line__is_valid_jump(cursor, sym)) 327 if (!disasm_line__is_valid_jump(cursor, sym))
328 return; 328 return;
329 329
330 /*
331 * This first was seen with a gcc function, _cpp_lex_token, that
332 * has the usual jumps:
333 *
334 * │1159e6c: ↓ jne 115aa32 <_cpp_lex_token@@Base+0xf92>
335 *
336 * I.e. jumps to a label inside that function (_cpp_lex_token), and
337 * those works, but also this kind:
338 *
339 * │1159e8b: ↓ jne c469be <cpp_named_operator2name@@Base+0xa72>
340 *
341 * I.e. jumps to another function, outside _cpp_lex_token, which
342 * are not being correctly handled generating as a side effect references
343 * to ab->offset[] entries that are set to NULL, so to make this code
344 * more robust, check that here.
345 *
346 * A proper fix for will be put in place, looking at the function
347 * name right after the '<' token and probably treating this like a
348 * 'call' instruction.
349 */
330 target = ab->offsets[cursor->ops.target.offset]; 350 target = ab->offsets[cursor->ops.target.offset];
351 if (target == NULL) {
352 ui_helpline__printf("WARN: jump target inconsistency, press 'o', ab->offsets[%#x] = NULL\n",
353 cursor->ops.target.offset);
354 return;
355 }
331 356
332 bcursor = browser_line(&cursor->al); 357 bcursor = browser_line(&cursor->al);
333 btarget = browser_line(target); 358 btarget = browser_line(target);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 9faf3b5367db..6470ea2aa25e 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -60,6 +60,12 @@
60#include "sane_ctype.h" 60#include "sane_ctype.h"
61#include "symbol/kallsyms.h" 61#include "symbol/kallsyms.h"
62 62
63static bool auxtrace__dont_decode(struct perf_session *session)
64{
65 return !session->itrace_synth_opts ||
66 session->itrace_synth_opts->dont_decode;
67}
68
63int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 69int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
64 struct auxtrace_mmap_params *mp, 70 struct auxtrace_mmap_params *mp,
65 void *userpg, int fd) 71 void *userpg, int fd)
@@ -762,6 +768,9 @@ int auxtrace_queues__process_index(struct auxtrace_queues *queues,
762 size_t i; 768 size_t i;
763 int err; 769 int err;
764 770
771 if (auxtrace__dont_decode(session))
772 return 0;
773
765 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 774 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
766 for (i = 0; i < auxtrace_index->nr; i++) { 775 for (i = 0; i < auxtrace_index->nr; i++) {
767 ent = &auxtrace_index->entries[i]; 776 ent = &auxtrace_index->entries[i];
@@ -892,12 +901,6 @@ out_free:
892 return err; 901 return err;
893} 902}
894 903
895static bool auxtrace__dont_decode(struct perf_session *session)
896{
897 return !session->itrace_synth_opts ||
898 session->itrace_synth_opts->dont_decode;
899}
900
901int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, 904int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
902 union perf_event *event, 905 union perf_event *event,
903 struct perf_session *session) 906 struct perf_session *session)
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 1e97937b03a9..6f09e4962dad 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -137,6 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
137 struct perf_evsel *evsel; 137 struct perf_evsel *evsel;
138 bool use_sample_identifier = false; 138 bool use_sample_identifier = false;
139 bool use_comm_exec; 139 bool use_comm_exec;
140 bool sample_id = opts->sample_id;
140 141
141 /* 142 /*
142 * Set the evsel leader links before we configure attributes, 143 * Set the evsel leader links before we configure attributes,
@@ -163,8 +164,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
163 * match the id. 164 * match the id.
164 */ 165 */
165 use_sample_identifier = perf_can_sample_identifier(); 166 use_sample_identifier = perf_can_sample_identifier();
166 evlist__for_each_entry(evlist, evsel) 167 sample_id = true;
167 perf_evsel__set_sample_id(evsel, use_sample_identifier);
168 } else if (evlist->nr_entries > 1) { 168 } else if (evlist->nr_entries > 1) {
169 struct perf_evsel *first = perf_evlist__first(evlist); 169 struct perf_evsel *first = perf_evlist__first(evlist);
170 170
@@ -174,6 +174,10 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
174 use_sample_identifier = perf_can_sample_identifier(); 174 use_sample_identifier = perf_can_sample_identifier();
175 break; 175 break;
176 } 176 }
177 sample_id = true;
178 }
179
180 if (sample_id) {
177 evlist__for_each_entry(evlist, evsel) 181 evlist__for_each_entry(evlist, evsel)
178 perf_evsel__set_sample_id(evsel, use_sample_identifier); 182 perf_evsel__set_sample_id(evsel, use_sample_identifier);
179 } 183 }
diff --git a/tools/perf/util/trigger.h b/tools/perf/util/trigger.h
index 370138e7e35c..88223bc7c82b 100644
--- a/tools/perf/util/trigger.h
+++ b/tools/perf/util/trigger.h
@@ -12,7 +12,7 @@
12 * States and transits: 12 * States and transits:
13 * 13 *
14 * 14 *
15 * OFF--(on)--> READY --(hit)--> HIT 15 * OFF--> ON --> READY --(hit)--> HIT
16 * ^ | 16 * ^ |
17 * | (ready) 17 * | (ready)
18 * | | 18 * | |
@@ -27,8 +27,9 @@ struct trigger {
27 volatile enum { 27 volatile enum {
28 TRIGGER_ERROR = -2, 28 TRIGGER_ERROR = -2,
29 TRIGGER_OFF = -1, 29 TRIGGER_OFF = -1,
30 TRIGGER_READY = 0, 30 TRIGGER_ON = 0,
31 TRIGGER_HIT = 1, 31 TRIGGER_READY = 1,
32 TRIGGER_HIT = 2,
32 } state; 33 } state;
33 const char *name; 34 const char *name;
34}; 35};
@@ -50,7 +51,7 @@ static inline bool trigger_is_error(struct trigger *t)
50static inline void trigger_on(struct trigger *t) 51static inline void trigger_on(struct trigger *t)
51{ 52{
52 TRIGGER_WARN_ONCE(t, TRIGGER_OFF); 53 TRIGGER_WARN_ONCE(t, TRIGGER_OFF);
53 t->state = TRIGGER_READY; 54 t->state = TRIGGER_ON;
54} 55}
55 56
56static inline void trigger_ready(struct trigger *t) 57static inline void trigger_ready(struct trigger *t)
diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c
index 35ade7406dcd..3ae77ba93208 100644
--- a/tools/testing/selftests/powerpc/mm/subpage_prot.c
+++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c
@@ -135,6 +135,16 @@ static int run_test(void *addr, unsigned long size)
135 return 0; 135 return 0;
136} 136}
137 137
138static int syscall_available(void)
139{
140 int rc;
141
142 errno = 0;
143 rc = syscall(__NR_subpage_prot, 0, 0, 0);
144
145 return rc == 0 || (errno != ENOENT && errno != ENOSYS);
146}
147
138int test_anon(void) 148int test_anon(void)
139{ 149{
140 unsigned long align; 150 unsigned long align;
@@ -145,6 +155,8 @@ int test_anon(void)
145 void *mallocblock; 155 void *mallocblock;
146 unsigned long mallocsize; 156 unsigned long mallocsize;
147 157
158 SKIP_IF(!syscall_available());
159
148 if (getpagesize() != 0x10000) { 160 if (getpagesize() != 0x10000) {
149 fprintf(stderr, "Kernel page size must be 64K!\n"); 161 fprintf(stderr, "Kernel page size must be 64K!\n");
150 return 1; 162 return 1;
@@ -180,6 +192,8 @@ int test_file(void)
180 off_t filesize; 192 off_t filesize;
181 int fd; 193 int fd;
182 194
195 SKIP_IF(!syscall_available());
196
183 fd = open(file_name, O_RDWR); 197 fd = open(file_name, O_RDWR);
184 if (fd == -1) { 198 if (fd == -1) {
185 perror("failed to open file"); 199 perror("failed to open file");
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index a23453943ad2..5c72ff978f27 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -16,7 +16,7 @@ $(OUTPUT)/tm-syscall: tm-syscall-asm.S
16$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include 16$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
17$(OUTPUT)/tm-tmspr: CFLAGS += -pthread 17$(OUTPUT)/tm-tmspr: CFLAGS += -pthread
18$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 18$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
19$(OUTPUT)/tm-resched-dscr: ../pmu/lib.o 19$(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
20$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx 20$(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -mvsx
21$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64 21$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
22 22
diff --git a/tools/testing/selftests/powerpc/tm/tm-trap.c b/tools/testing/selftests/powerpc/tm/tm-trap.c
index 5d92c23ee6cb..179d592f0073 100644
--- a/tools/testing/selftests/powerpc/tm/tm-trap.c
+++ b/tools/testing/selftests/powerpc/tm/tm-trap.c
@@ -255,6 +255,8 @@ int tm_trap_test(void)
255 255
256 struct sigaction trap_sa; 256 struct sigaction trap_sa;
257 257
258 SKIP_IF(!have_htm());
259
258 trap_sa.sa_flags = SA_SIGINFO; 260 trap_sa.sa_flags = SA_SIGINFO;
259 trap_sa.sa_sigaction = trap_signal_handler; 261 trap_sa.sa_sigaction = trap_signal_handler;
260 sigaction(SIGTRAP, &trap_sa, NULL); 262 sigaction(SIGTRAP, &trap_sa, NULL);
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index d2561895a021..22d564673830 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -2,25 +2,33 @@
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3#please run as root 3#please run as root
4 4
5#we need 256M, below is the size in kB
6needmem=262144
7mnt=./huge 5mnt=./huge
8exitcode=0 6exitcode=0
9 7
10#get pagesize and freepages from /proc/meminfo 8#get huge pagesize and freepages from /proc/meminfo
11while read name size unit; do 9while read name size unit; do
12 if [ "$name" = "HugePages_Free:" ]; then 10 if [ "$name" = "HugePages_Free:" ]; then
13 freepgs=$size 11 freepgs=$size
14 fi 12 fi
15 if [ "$name" = "Hugepagesize:" ]; then 13 if [ "$name" = "Hugepagesize:" ]; then
16 pgsize=$size 14 hpgsize_KB=$size
17 fi 15 fi
18done < /proc/meminfo 16done < /proc/meminfo
19 17
18# Simple hugetlbfs tests have a hardcoded minimum requirement of
19# huge pages totaling 256MB (262144KB) in size. The userfaultfd
20# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take
21# both of these requirements into account and attempt to increase
22# number of huge pages available.
23nr_cpus=$(nproc)
24hpgsize_MB=$((hpgsize_KB / 1024))
25half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
26needmem_KB=$((half_ufd_size_MB * 2 * 1024))
27
20#set proper nr_hugepages 28#set proper nr_hugepages
21if [ -n "$freepgs" ] && [ -n "$pgsize" ]; then 29if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
22 nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` 30 nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
23 needpgs=`expr $needmem / $pgsize` 31 needpgs=$((needmem_KB / hpgsize_KB))
24 tries=2 32 tries=2
25 while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do 33 while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
26 lackpgs=$(( $needpgs - $freepgs )) 34 lackpgs=$(( $needpgs - $freepgs ))
@@ -107,8 +115,9 @@ fi
107echo "---------------------------" 115echo "---------------------------"
108echo "running userfaultfd_hugetlb" 116echo "running userfaultfd_hugetlb"
109echo "---------------------------" 117echo "---------------------------"
110# 256MB total huge pages == 128MB src and 128MB dst 118# Test requires source and destination huge pages. Size of source
111./userfaultfd hugetlb 128 32 $mnt/ufd_test_file 119# (half_ufd_size_MB) is passed as argument to test.
120./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
112if [ $? -ne 0 ]; then 121if [ $? -ne 0 ]; then
113 echo "[FAIL]" 122 echo "[FAIL]"
114 exitcode=1 123 exitcode=1
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index 361466a2eaef..ade443a88421 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -95,6 +95,10 @@ asm (
95 "int3\n\t" 95 "int3\n\t"
96 "vmcode_int80:\n\t" 96 "vmcode_int80:\n\t"
97 "int $0x80\n\t" 97 "int $0x80\n\t"
98 "vmcode_popf_hlt:\n\t"
99 "push %ax\n\t"
100 "popf\n\t"
101 "hlt\n\t"
98 "vmcode_umip:\n\t" 102 "vmcode_umip:\n\t"
99 /* addressing via displacements */ 103 /* addressing via displacements */
100 "smsw (2052)\n\t" 104 "smsw (2052)\n\t"
@@ -124,8 +128,8 @@ asm (
124 128
125extern unsigned char vmcode[], end_vmcode[]; 129extern unsigned char vmcode[], end_vmcode[];
126extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], 130extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
127 vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_umip[], 131 vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[],
128 vmcode_umip_str[], vmcode_umip_sldt[]; 132 vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[];
129 133
130/* Returns false if the test was skipped. */ 134/* Returns false if the test was skipped. */
131static bool do_test(struct vm86plus_struct *v86, unsigned long eip, 135static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
@@ -175,7 +179,7 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
175 (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { 179 (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
176 printf("[OK]\tReturned correctly\n"); 180 printf("[OK]\tReturned correctly\n");
177 } else { 181 } else {
178 printf("[FAIL]\tIncorrect return reason\n"); 182 printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip);
179 nerrs++; 183 nerrs++;
180 } 184 }
181 185
@@ -264,6 +268,9 @@ int main(void)
264 v86.regs.ds = load_addr / 16; 268 v86.regs.ds = load_addr / 16;
265 v86.regs.es = load_addr / 16; 269 v86.regs.es = load_addr / 16;
266 270
271 /* Use the end of the page as our stack. */
272 v86.regs.esp = 4096;
273
267 assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ 274 assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
268 275
269 /* #BR -- should deliver SIG??? */ 276 /* #BR -- should deliver SIG??? */
@@ -295,6 +302,23 @@ int main(void)
295 v86.regs.eflags &= ~X86_EFLAGS_IF; 302 v86.regs.eflags &= ~X86_EFLAGS_IF;
296 do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); 303 do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
297 304
305 /* POPF with VIP set but IF clear: should not trap */
306 v86.regs.eflags = X86_EFLAGS_VIP;
307 v86.regs.eax = 0;
308 do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear");
309
310 /* POPF with VIP set and IF set: should trap */
311 v86.regs.eflags = X86_EFLAGS_VIP;
312 v86.regs.eax = X86_EFLAGS_IF;
313 do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set");
314
315 /* POPF with VIP clear and IF set: should not trap */
316 v86.regs.eflags = 0;
317 v86.regs.eax = X86_EFLAGS_IF;
318 do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set");
319
320 v86.regs.eflags = 0;
321
298 /* INT3 -- should cause #BP */ 322 /* INT3 -- should cause #BP */
299 do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); 323 do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
300 324
@@ -318,7 +342,7 @@ int main(void)
318 clearhandler(SIGSEGV); 342 clearhandler(SIGSEGV);
319 343
320 /* Make sure nothing explodes if we fork. */ 344 /* Make sure nothing explodes if we fork. */
321 if (fork() > 0) 345 if (fork() == 0)
322 return 0; 346 return 0;
323 347
324 return (nerrs == 0 ? 0 : 1); 348 return (nerrs == 0 ? 0 : 1);
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index be81621446f0..0b4f1cc2291c 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -450,7 +450,7 @@ static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
450 num_vsyscall_traps++; 450 num_vsyscall_traps++;
451} 451}
452 452
453static int test_native_vsyscall(void) 453static int test_emulation(void)
454{ 454{
455 time_t tmp; 455 time_t tmp;
456 bool is_native; 456 bool is_native;
@@ -458,7 +458,7 @@ static int test_native_vsyscall(void)
458 if (!vtime) 458 if (!vtime)
459 return 0; 459 return 0;
460 460
461 printf("[RUN]\tchecking for native vsyscall\n"); 461 printf("[RUN]\tchecking that vsyscalls are emulated\n");
462 sethandler(SIGTRAP, sigtrap, 0); 462 sethandler(SIGTRAP, sigtrap, 0);
463 set_eflags(get_eflags() | X86_EFLAGS_TF); 463 set_eflags(get_eflags() | X86_EFLAGS_TF);
464 vtime(&tmp); 464 vtime(&tmp);
@@ -474,11 +474,12 @@ static int test_native_vsyscall(void)
474 */ 474 */
475 is_native = (num_vsyscall_traps > 1); 475 is_native = (num_vsyscall_traps > 1);
476 476
477 printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", 477 printf("[%s]\tvsyscalls are %s (%d instructions in vsyscall page)\n",
478 (is_native ? "FAIL" : "OK"),
478 (is_native ? "native" : "emulated"), 479 (is_native ? "native" : "emulated"),
479 (int)num_vsyscall_traps); 480 (int)num_vsyscall_traps);
480 481
481 return 0; 482 return is_native;
482} 483}
483#endif 484#endif
484 485
@@ -498,7 +499,7 @@ int main(int argc, char **argv)
498 nerrs += test_vsys_r(); 499 nerrs += test_vsys_r();
499 500
500#ifdef __x86_64__ 501#ifdef __x86_64__
501 nerrs += test_native_vsyscall(); 502 nerrs += test_emulation();
502#endif 503#endif
503 504
504 return nerrs ? 1 : 0; 505 return nerrs ? 1 : 0;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 70f4c30918eb..282389eb204f 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -581,6 +581,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
581 581
582int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) 582int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
583{ 583{
584 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
584 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); 585 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
585 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); 586 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
586 587
@@ -594,6 +595,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
594 ptimer->cnt_ctl = 0; 595 ptimer->cnt_ctl = 0;
595 kvm_timer_update_state(vcpu); 596 kvm_timer_update_state(vcpu);
596 597
598 if (timer->enabled && irqchip_in_kernel(vcpu->kvm))
599 kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq);
600
597 return 0; 601 return 0;
598} 602}
599 603
@@ -767,7 +771,7 @@ int kvm_timer_hyp_init(bool has_gic)
767 static_branch_enable(&has_gic_active_state); 771 static_branch_enable(&has_gic_active_state);
768 } 772 }
769 773
770 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); 774 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
771 775
772 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, 776 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
773 "kvm/arm/timer:starting", kvm_timer_starting_cpu, 777 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 86941f6181bb..53572304843b 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -384,14 +384,11 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu)
384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 384int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
385 struct kvm_mp_state *mp_state) 385 struct kvm_mp_state *mp_state)
386{ 386{
387 vcpu_load(vcpu);
388
389 if (vcpu->arch.power_off) 387 if (vcpu->arch.power_off)
390 mp_state->mp_state = KVM_MP_STATE_STOPPED; 388 mp_state->mp_state = KVM_MP_STATE_STOPPED;
391 else 389 else
392 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; 390 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
393 391
394 vcpu_put(vcpu);
395 return 0; 392 return 0;
396} 393}
397 394
@@ -400,8 +397,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
400{ 397{
401 int ret = 0; 398 int ret = 0;
402 399
403 vcpu_load(vcpu);
404
405 switch (mp_state->mp_state) { 400 switch (mp_state->mp_state) {
406 case KVM_MP_STATE_RUNNABLE: 401 case KVM_MP_STATE_RUNNABLE:
407 vcpu->arch.power_off = false; 402 vcpu->arch.power_off = false;
@@ -413,7 +408,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
413 ret = -EINVAL; 408 ret = -EINVAL;
414 } 409 }
415 410
416 vcpu_put(vcpu);
417 return ret; 411 return ret;
418} 412}
419 413
@@ -1036,8 +1030,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1036 struct kvm_device_attr attr; 1030 struct kvm_device_attr attr;
1037 long r; 1031 long r;
1038 1032
1039 vcpu_load(vcpu);
1040
1041 switch (ioctl) { 1033 switch (ioctl) {
1042 case KVM_ARM_VCPU_INIT: { 1034 case KVM_ARM_VCPU_INIT: {
1043 struct kvm_vcpu_init init; 1035 struct kvm_vcpu_init init;
@@ -1114,7 +1106,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1114 r = -EINVAL; 1106 r = -EINVAL;
1115 } 1107 }
1116 1108
1117 vcpu_put(vcpu);
1118 return r; 1109 return r;
1119} 1110}
1120 1111
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index f5c3d6d7019e..b89ce5432214 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
215 * are now visible to the system register interface. 215 * are now visible to the system register interface.
216 */ 216 */
217 if (!cpu_if->vgic_sre) { 217 if (!cpu_if->vgic_sre) {
218 dsb(st); 218 dsb(sy);
219 isb();
219 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); 220 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
220 } 221 }
221 222
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ec62d1cccab7..b960acdd0c05 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1810,9 +1810,9 @@ int kvm_mmu_init(void)
1810 */ 1810 */
1811 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); 1811 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1812 1812
1813 kvm_info("IDMAP page: %lx\n", hyp_idmap_start); 1813 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1814 kvm_info("HYP VA range: %lx:%lx\n", 1814 kvm_debug("HYP VA range: %lx:%lx\n",
1815 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); 1815 kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
1816 1816
1817 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && 1817 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1818 hyp_idmap_start < kern_hyp_va(~0UL) && 1818 hyp_idmap_start < kern_hyp_va(~0UL) &&
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 83d82bd7dc4e..dbe99d635c80 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
113 /* Loop over all IRQs affected by this read */ 113 /* Loop over all IRQs affected by this read */
114 for (i = 0; i < len * 8; i++) { 114 for (i = 0; i < len * 8; i++) {
115 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 115 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
116 unsigned long flags;
116 117
118 spin_lock_irqsave(&irq->irq_lock, flags);
117 if (irq_is_pending(irq)) 119 if (irq_is_pending(irq))
118 value |= (1U << i); 120 value |= (1U << i);
121 spin_unlock_irqrestore(&irq->irq_lock, flags);
119 122
120 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
121 } 124 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index c32d7b93ffd1..29556f71b691 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void)
37 vgic_v2_write_lr(i, 0); 37 vgic_v2_write_lr(i, 0);
38} 38}
39 39
40void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
41{
42 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
43
44 cpuif->vgic_hcr |= GICH_HCR_NPIE;
45}
46
40void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) 47void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
41{ 48{
42 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 49 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
64 int lr; 71 int lr;
65 unsigned long flags; 72 unsigned long flags;
66 73
67 cpuif->vgic_hcr &= ~GICH_HCR_UIE; 74 cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
68 75
69 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 76 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
70 u32 val = cpuif->vgic_lr[lr]; 77 u32 val = cpuif->vgic_lr[lr];
@@ -410,7 +417,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
410 kvm_vgic_global_state.type = VGIC_V2; 417 kvm_vgic_global_state.type = VGIC_V2;
411 kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; 418 kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
412 419
413 kvm_info("vgic-v2@%llx\n", info->vctrl.start); 420 kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
414 421
415 return 0; 422 return 0;
416out: 423out:
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 6b329414e57a..0ff2006f3781 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -26,6 +26,13 @@ static bool group1_trap;
26static bool common_trap; 26static bool common_trap;
27static bool gicv4_enable; 27static bool gicv4_enable;
28 28
29void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
30{
31 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
32
33 cpuif->vgic_hcr |= ICH_HCR_NPIE;
34}
35
29void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) 36void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
30{ 37{
31 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; 38 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
47 int lr; 54 int lr;
48 unsigned long flags; 55 unsigned long flags;
49 56
50 cpuif->vgic_hcr &= ~ICH_HCR_UIE; 57 cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE);
51 58
52 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 59 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
53 u64 val = cpuif->vgic_lr[lr]; 60 u64 val = cpuif->vgic_lr[lr];
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index c7c5ef190afa..8201899126f6 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
495 return ret; 495 return ret;
496} 496}
497 497
498/**
499 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
500 * @vcpu: The VCPU pointer
501 * @vintid: The INTID of the interrupt
502 *
503 * Reset the active and pending states of a mapped interrupt. Kernel
504 * subsystems injecting mapped interrupts should reset their interrupt lines
505 * when we are doing a reset of the VM.
506 */
507void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
508{
509 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
510 unsigned long flags;
511
512 if (!irq->hw)
513 goto out;
514
515 spin_lock_irqsave(&irq->irq_lock, flags);
516 irq->active = false;
517 irq->pending_latch = false;
518 irq->line_level = false;
519 spin_unlock_irqrestore(&irq->irq_lock, flags);
520out:
521 vgic_put_irq(vcpu->kvm, irq);
522}
523
498int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) 524int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
499{ 525{
500 struct vgic_irq *irq; 526 struct vgic_irq *irq;
@@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
684 vgic_v3_set_underflow(vcpu); 710 vgic_v3_set_underflow(vcpu);
685} 711}
686 712
713static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
714{
715 if (kvm_vgic_global_state.type == VGIC_V2)
716 vgic_v2_set_npie(vcpu);
717 else
718 vgic_v3_set_npie(vcpu);
719}
720
687/* Requires the ap_list_lock to be held. */ 721/* Requires the ap_list_lock to be held. */
688static int compute_ap_list_depth(struct kvm_vcpu *vcpu) 722static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
723 bool *multi_sgi)
689{ 724{
690 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 725 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
691 struct vgic_irq *irq; 726 struct vgic_irq *irq;
692 int count = 0; 727 int count = 0;
693 728
729 *multi_sgi = false;
730
694 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 731 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
695 732
696 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 733 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
697 spin_lock(&irq->irq_lock); 734 spin_lock(&irq->irq_lock);
698 /* GICv2 SGIs can count for more than one... */ 735 /* GICv2 SGIs can count for more than one... */
699 if (vgic_irq_is_sgi(irq->intid) && irq->source) 736 if (vgic_irq_is_sgi(irq->intid) && irq->source) {
700 count += hweight8(irq->source); 737 int w = hweight8(irq->source);
701 else 738
739 count += w;
740 *multi_sgi |= (w > 1);
741 } else {
702 count++; 742 count++;
743 }
703 spin_unlock(&irq->irq_lock); 744 spin_unlock(&irq->irq_lock);
704 } 745 }
705 return count; 746 return count;
@@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
710{ 751{
711 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 752 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
712 struct vgic_irq *irq; 753 struct vgic_irq *irq;
713 int count = 0; 754 int count;
755 bool npie = false;
756 bool multi_sgi;
757 u8 prio = 0xff;
714 758
715 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 759 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
716 760
717 if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) 761 count = compute_ap_list_depth(vcpu, &multi_sgi);
762 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
718 vgic_sort_ap_list(vcpu); 763 vgic_sort_ap_list(vcpu);
719 764
765 count = 0;
766
720 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 767 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
721 spin_lock(&irq->irq_lock); 768 spin_lock(&irq->irq_lock);
722 769
723 if (unlikely(vgic_target_oracle(irq) != vcpu))
724 goto next;
725
726 /* 770 /*
727 * If we get an SGI with multiple sources, try to get 771 * If we have multi-SGIs in the pipeline, we need to
728 * them in all at once. 772 * guarantee that they are all seen before any IRQ of
773 * lower priority. In that case, we need to filter out
774 * these interrupts by exiting early. This is easy as
775 * the AP list has been sorted already.
729 */ 776 */
730 do { 777 if (multi_sgi && irq->priority > prio) {
778 spin_unlock(&irq->irq_lock);
779 break;
780 }
781
782 if (likely(vgic_target_oracle(irq) == vcpu)) {
731 vgic_populate_lr(vcpu, irq, count++); 783 vgic_populate_lr(vcpu, irq, count++);
732 } while (irq->source && count < kvm_vgic_global_state.nr_lr);
733 784
734next: 785 if (irq->source) {
786 npie = true;
787 prio = irq->priority;
788 }
789 }
790
735 spin_unlock(&irq->irq_lock); 791 spin_unlock(&irq->irq_lock);
736 792
737 if (count == kvm_vgic_global_state.nr_lr) { 793 if (count == kvm_vgic_global_state.nr_lr) {
@@ -742,6 +798,9 @@ next:
742 } 798 }
743 } 799 }
744 800
801 if (npie)
802 vgic_set_npie(vcpu);
803
745 vcpu->arch.vgic_cpu.used_lrs = count; 804 vcpu->arch.vgic_cpu.used_lrs = count;
746 805
747 /* Nuke remaining LRs */ 806 /* Nuke remaining LRs */
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 12c37b89f7a3..f5b8519e5546 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -96,6 +96,7 @@
96/* we only support 64 kB translation table page size */ 96/* we only support 64 kB translation table page size */
97#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) 97#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
98 98
99/* Requires the irq_lock to be held by the caller. */
99static inline bool irq_is_pending(struct vgic_irq *irq) 100static inline bool irq_is_pending(struct vgic_irq *irq)
100{ 101{
101 if (irq->config == VGIC_CONFIG_EDGE) 102 if (irq->config == VGIC_CONFIG_EDGE)
@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
159void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); 160void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
160void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); 161void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
161void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); 162void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
163void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
162int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); 164int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
163int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, 165int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
164 int offset, u32 *val); 166 int offset, u32 *val);
@@ -188,6 +190,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
188void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); 190void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
189void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); 191void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
190void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); 192void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
193void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
191void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 194void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
192void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 195void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
193void vgic_v3_enable(struct kvm_vcpu *vcpu); 196void vgic_v3_enable(struct kvm_vcpu *vcpu);