aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt3
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt5
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/media/uapi/cec/cec-func-close.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-ioctl.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-open.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-poll.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-intro.rst17
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-g-mode.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-receive.rst5
-rw-r--r--Documentation/power/states.txt4
-rw-r--r--MAINTAINERS26
-rw-r--r--Makefile6
-rw-r--r--arch/arc/include/asm/delay.h4
-rw-r--r--arch/arc/kernel/head.S14
-rw-r--r--arch/arc/kernel/mcip.c55
-rw-r--r--arch/arc/kernel/smp.c25
-rw-r--r--arch/arc/kernel/unaligned.c3
-rw-r--r--arch/arm64/crypto/aes-modes.S88
-rw-r--r--arch/arm64/kernel/topology.c8
-rw-r--r--arch/frv/include/asm/atomic.h35
-rw-r--r--arch/mn10300/include/asm/switch_to.h2
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/parisc/include/uapi/asm/swab.h5
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/stackprotector.h40
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/module_64.c8
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h8
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/sstate.c6
-rw-r--r--arch/sparc/kernel/traps_64.c73
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c8
-rw-r--r--arch/x86/events/intel/rapl.c60
-rw-r--r--arch/x86/events/intel/uncore.c232
-rw-r--r--arch/x86/include/asm/microcode.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c31
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c22
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c9
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c16
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algif_aead.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c9
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/video_detect.c11
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/base/firmware_class.c5
-rw-r--r--drivers/base/memory.c12
-rw-r--r--drivers/base/power/runtime.c11
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/driver_chipcommon.c11
-rw-r--r--drivers/bcma/driver_mips.c3
-rw-r--r--drivers/block/xen-blkfront.c22
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c17
-rw-r--r--drivers/cpufreq/intel_pstate.c44
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.h1
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h3
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c4
-rw-r--r--drivers/dma/cppi41.c69
-rw-r--r--drivers/dma/pl330.c19
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/gpio/gpiolib.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c24
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c157
-rw-r--r--drivers/gpu/drm/ast/ast_post.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c7
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig9
-rw-r--r--drivers/gpu/drm/drm_atomic.c25
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c23
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c20
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c172
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c25
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/hid/hid-cp2112.c28
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c28
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c8
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c20
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/humidity/dht11.c6
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c12
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c15
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/isdn/hardware/eicon/message.c3
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/raid5-cache.c106
-rw-r--r--drivers/md/raid5.c121
-rw-r--r--drivers/md/raid5.h7
-rw-r--r--drivers/media/cec/cec-adap.c105
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c33
-rw-r--r--drivers/media/i2c/tvp5150.c56
-rw-r--r--drivers/media/i2c/tvp5150_reg.h9
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c133
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/ti_hecc.c16
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c80
-rw-r--r--drivers/net/ethernet/cadence/macb.c188
-rw-r--r--drivers/net/ethernet/cadence/macb.h20
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c33
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c112
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/gtp.c13
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/bcm63xx.c21
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy.c15
-rw-r--r--drivers/net/phy/phy_led_triggers.c9
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c34
-rw-r--r--drivers/net/virtio_net.c25
-rw-r--r--drivers/net/vxlan.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c12
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c13
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c36
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/parport/parport_gsc.c8
-rw-r--r--drivers/pci/pcie/aspm.c19
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c60
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c30
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c7
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c6
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/rtc-jz4740.c12
-rw-r--r--drivers/scsi/sd.c17
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/staging/greybus/timesync_platform.c6
-rw-r--r--drivers/thermal/thermal_hwmon.c20
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c13
-rw-r--r--drivers/usb/musb/musb_core.c26
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c15
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/vhost/vsock.c13
-rw-r--r--drivers/video/fbdev/core/fbcmap.c26
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/inode.c26
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/dax.c7
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/fscache/cookie.c5
-rw-r--r--fs/fscache/netfs.c1
-rw-r--r--fs/fscache/object.c32
-rw-r--r--fs/iomap.c3
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/nfs4layouts.c5
-rw-r--r--fs/nfsd/nfs4state.c19
-rw-r--r--fs/nfsd/state.h4
-rw-r--r--fs/nfsd/vfs.c97
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/romfs/super.c23
-rw-r--r--fs/userfaultfd.c37
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c70
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c48
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c90
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c28
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_inode.c23
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_connector.h16
-rw-r--r--include/drm/drm_mode_config.h2
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/can/core.h7
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/export.h17
-rw-r--r--include/linux/fscache-cache.h1
-rw-r--r--include/linux/gpio/driver.h70
-rw-r--r--include/linux/hyperv.h32
-rw-r--r--include/linux/irq.h17
-rw-r--r--include/linux/log2.h13
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/netdevice.h29
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/lwtunnel.h13
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netfilter/nft_fib.h6
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/soc/arc/mcip.h16
-rw-r--r--include/uapi/linux/cec-funcs.h10
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/netfilter/nf_log.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/cxgb3-abi.h2
-rw-r--r--init/Kconfig4
-rw-r--r--kernel/bpf/arraymap.c18
-rw-r--r--kernel/bpf/hashtab.c22
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c26
-rw-r--r--kernel/cgroup.c13
-rw-r--r--kernel/events/core.c69
-rw-r--r--kernel/irq/irqdomain.c44
-rw-r--r--kernel/module.c53
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/trace/trace_hwlat.c8
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/ucount.c14
-rw-r--r--kernel/watchdog.c9
-rw-r--r--kernel/watchdog_hld.c3
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/radix-tree.c2
-rw-r--r--mm/filemap.c5
-rw-r--r--mm/huge_memory.c18
-rw-r--r--mm/kasan/report.c3
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory_hotplug.c56
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c69
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/slub.c23
-rw-r--r--mm/zswap.c30
-rw-r--r--net/batman-adv/fragmentation.c10
-rw-r--r--net/bridge/br_netlink.c33
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/raw.c4
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/lwtunnel.c66
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dsa/slave.c8
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c8
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c15
-rw-r--r--net/ipv4/tcp_fastopen.c1
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ila/ila_lwt.c1
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c36
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c13
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/seg6.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c1
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mpls/af_mpls.c48
-rw-r--r--net/mpls/mpls_iptunnel.c1
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c44
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_tables_api.c67
-rw-r--r--net/netfilter/nft_dynset.c3
-rw-r--r--net/netfilter/nft_log.c3
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_objref.c6
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/cls_flower.c4
-rw-r--r--net/sched/cls_matchall.c127
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c2
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/tipc/node.c9
-rw-r--r--net/tipc/server.c48
-rw-r--r--net/tipc/subscr.c124
-rw-r--r--net/tipc/subscr.h1
-rw-r--r--net/unix/af_unix.c27
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c1
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c1
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/genksyms/genksyms.c19
-rw-r--r--scripts/kallsyms.c12
-rw-r--r--scripts/mod/modpost.c10
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c53
518 files changed, 5106 insertions, 3392 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 0dcb7c7d3e40..944657684d73 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -15,6 +15,9 @@ Properties:
15 Second cell specifies the irq distribution mode to cores 15 Second cell specifies the irq distribution mode to cores
16 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3 16 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
17 17
18 The second cell in interrupts property is deprecated and may be ignored by
19 the kernel.
20
18 intc accessed via the special ARC AUX register interface, hence "reg" property 21 intc accessed via the special ARC AUX register interface, hence "reg" property
19 is not specified. 22 is not specified.
20 23
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c010fafc66a8..c7194e87d5f4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
7* Ethernet controller node 7* Ethernet controller node
8 8
9Required properties: 9Required properties:
10- compatible: Should be "mediatek,mt7623-eth" 10- compatible: Should be "mediatek,mt2701-eth"
11- reg: Address and length of the register set for the device 11- reg: Address and length of the register set for the device
12- interrupts: Should contain the three frame engines interrupts in numeric 12- interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2. 13 order. These are fe_int0, fe_int1 and fe_int2.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index ff1bc4b1bb3b..fb5056b22685 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -19,8 +19,9 @@ Optional Properties:
19 specifications. If neither of these are specified, the default is to 19 specifications. If neither of these are specified, the default is to
20 assume clause 22. 20 assume clause 22.
21 21
22 If the phy's identifier is known then the list may contain an entry 22 If the PHY reports an incorrect ID (or none at all) then the
23 of the form: "ethernet-phy-idAAAA.BBBB" where 23 "compatible" list may contain an entry with the correct PHY ID in the
24 form: "ethernet-phy-idAAAA.BBBB" where
24 AAAA - The value of the 16 bit Phy Identifier 1 register as 25 AAAA - The value of the 16 bit Phy Identifier 1 register as
25 4 hex digits. This is the chip vendor OUI bits 3:18 26 4 hex digits. This is the chip vendor OUI bits 3:18
26 BBBB - The value of the 16 bit Phy Identifier 2 register as 27 BBBB - The value of the 16 bit Phy Identifier 2 register as
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 72624a16b792..c94b4675d021 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
212snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table. 212snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
213It's slow but very precise. 213It's slow but very precise.
214 214
215Table 1-2: Contents of the status files (as of 4.1) 215Table 1-2: Contents of the status files (as of 4.8)
216.............................................................................. 216..............................................................................
217 Field Content 217 Field Content
218 Name filename of the executable 218 Name filename of the executable
219 Umask file mode creation mask
219 State state (R is running, S is sleeping, D is sleeping 220 State state (R is running, S is sleeping, D is sleeping
220 in an uninterruptible wait, Z is zombie, 221 in an uninterruptible wait, Z is zombie,
221 T is traced or stopped) 222 T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
226 TracerPid PID of process tracing this process (0 if not) 227 TracerPid PID of process tracing this process (0 if not)
227 Uid Real, effective, saved set, and file system UIDs 228 Uid Real, effective, saved set, and file system UIDs
228 Gid Real, effective, saved set, and file system GIDs 229 Gid Real, effective, saved set, and file system GIDs
229 Umask file mode creation mask
230 FDSize number of file descriptor slots currently allocated 230 FDSize number of file descriptor slots currently allocated
231 Groups supplementary group list 231 Groups supplementary group list
232 NStgid descendant namespace thread group ID hierarchy 232 NStgid descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
236 VmPeak peak virtual memory size 236 VmPeak peak virtual memory size
237 VmSize total program size 237 VmSize total program size
238 VmLck locked memory size 238 VmLck locked memory size
239 VmPin pinned memory size
239 VmHWM peak resident set size ("high water mark") 240 VmHWM peak resident set size ("high water mark")
240 VmRSS size of memory portions. It contains the three 241 VmRSS size of memory portions. It contains the three
241 following parts (VmRSS = RssAnon + RssFile + RssShmem) 242 following parts (VmRSS = RssAnon + RssFile + RssShmem)
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst
index 8267c31b317d..895d9c2d1c04 100644
--- a/Documentation/media/uapi/cec/cec-func-close.rst
+++ b/Documentation/media/uapi/cec/cec-func-close.rst
@@ -33,11 +33,6 @@ Arguments
33Description 33Description
34=========== 34===========
35 35
36.. note::
37
38 This documents the proposed CEC API. This API is not yet finalized
39 and is currently only available as a staging kernel module.
40
41Closes the cec device. Resources associated with the file descriptor are 36Closes the cec device. Resources associated with the file descriptor are
42freed. The device configuration remain unchanged. 37freed. The device configuration remain unchanged.
43 38
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst
index 9e8dbb118d6a..7dcfd178fb24 100644
--- a/Documentation/media/uapi/cec/cec-func-ioctl.rst
+++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst
@@ -39,11 +39,6 @@ Arguments
39Description 39Description
40=========== 40===========
41 41
42.. note::
43
44 This documents the proposed CEC API. This API is not yet finalized
45 and is currently only available as a staging kernel module.
46
47The :c:func:`ioctl()` function manipulates cec device parameters. The 42The :c:func:`ioctl()` function manipulates cec device parameters. The
48argument ``fd`` must be an open file descriptor. 43argument ``fd`` must be an open file descriptor.
49 44
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index af3f5b5c24c6..0304388cd159 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -46,11 +46,6 @@ Arguments
46Description 46Description
47=========== 47===========
48 48
49.. note::
50
51 This documents the proposed CEC API. This API is not yet finalized
52 and is currently only available as a staging kernel module.
53
54To open a cec device applications call :c:func:`open()` with the 49To open a cec device applications call :c:func:`open()` with the
55desired device name. The function has no side effects; the device 50desired device name. The function has no side effects; the device
56configuration remain unchanged. 51configuration remain unchanged.
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst
index cfb73e6027a5..6a863cfda6e0 100644
--- a/Documentation/media/uapi/cec/cec-func-poll.rst
+++ b/Documentation/media/uapi/cec/cec-func-poll.rst
@@ -39,11 +39,6 @@ Arguments
39Description 39Description
40=========== 40===========
41 41
42.. note::
43
44 This documents the proposed CEC API. This API is not yet finalized
45 and is currently only available as a staging kernel module.
46
47With the :c:func:`poll()` function applications can wait for CEC 42With the :c:func:`poll()` function applications can wait for CEC
48events. 43events.
49 44
diff --git a/Documentation/media/uapi/cec/cec-intro.rst b/Documentation/media/uapi/cec/cec-intro.rst
index 4a19ea5323a9..07ee2b8f89d6 100644
--- a/Documentation/media/uapi/cec/cec-intro.rst
+++ b/Documentation/media/uapi/cec/cec-intro.rst
@@ -3,11 +3,6 @@
3Introduction 3Introduction
4============ 4============
5 5
6.. note::
7
8 This documents the proposed CEC API. This API is not yet finalized
9 and is currently only available as a staging kernel module.
10
11HDMI connectors provide a single pin for use by the Consumer Electronics 6HDMI connectors provide a single pin for use by the Consumer Electronics
12Control protocol. This protocol allows different devices connected by an 7Control protocol. This protocol allows different devices connected by an
13HDMI cable to communicate. The protocol for CEC version 1.4 is defined 8HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
31Drivers that support CEC will create a CEC device node (/dev/cecX) to 26Drivers that support CEC will create a CEC device node (/dev/cecX) to
32give userspace access to the CEC adapter. The 27give userspace access to the CEC adapter. The
33:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do. 28:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
29
30In order to check the support and test it, it is suggested to download
31the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
32provides three tools to handle CEC:
33
34- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
35 and monitor CEC messages.
36
37- cec-compliance: does a CEC compliance test of a remote CEC device to
38 determine how compliant the CEC implementation is.
39
40- cec-follower: emulates a CEC follower.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index 2b0ddb14b280..a0e961f11017 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -29,11 +29,6 @@ Arguments
29Description 29Description
30=========== 30===========
31 31
32.. note::
33
34 This documents the proposed CEC API. This API is not yet finalized
35 and is currently only available as a staging kernel module.
36
37All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query 32All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
38device information, applications call the ioctl with a pointer to a 33device information, applications call the ioctl with a pointer to a
39struct :c:type:`cec_caps`. The driver fills the structure and 34struct :c:type:`cec_caps`. The driver fills the structure and
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index b878637e91b3..09f09bbe28d4 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -35,11 +35,6 @@ Arguments
35Description 35Description
36=========== 36===========
37 37
38.. note::
39
40 This documents the proposed CEC API. This API is not yet finalized
41 and is currently only available as a staging kernel module.
42
43To query the current CEC logical addresses, applications call 38To query the current CEC logical addresses, applications call
44:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a 39:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
45struct :c:type:`cec_log_addrs` where the driver stores the logical addresses. 40struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
index 3357deb43c85..a3cdc75cec3e 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
@@ -35,11 +35,6 @@ Arguments
35Description 35Description
36=========== 36===========
37 37
38.. note::
39
40 This documents the proposed CEC API. This API is not yet finalized
41 and is currently only available as a staging kernel module.
42
43To query the current physical address applications call 38To query the current physical address applications call
44:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the 39:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
45driver stores the physical address. 40driver stores the physical address.
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index e256c6605de7..6e589a1fae17 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -30,11 +30,6 @@ Arguments
30Description 30Description
31=========== 31===========
32 32
33.. note::
34
35 This documents the proposed CEC API. This API is not yet finalized
36 and is currently only available as a staging kernel module.
37
38CEC devices can send asynchronous events. These can be retrieved by 33CEC devices can send asynchronous events. These can be retrieved by
39calling :c:func:`CEC_DQEVENT`. If the file descriptor is in 34calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
40non-blocking mode and no event is pending, then it will return -1 and 35non-blocking mode and no event is pending, then it will return -1 and
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index 4f5818b9d277..e4ded9df0a84 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -31,11 +31,6 @@ Arguments
31Description 31Description
32=========== 32===========
33 33
34.. note::
35
36 This documents the proposed CEC API. This API is not yet finalized
37 and is currently only available as a staging kernel module.
38
39By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent 34By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
40applications from stepping on each others toes it must be possible to 35applications from stepping on each others toes it must be possible to
41obtain exclusive access to the CEC adapter. This ioctl sets the 36obtain exclusive access to the CEC adapter. This ioctl sets the
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index bdf015b1d1dc..dc2adb391c0a 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -34,11 +34,6 @@ Arguments
34Description 34Description
35=========== 35===========
36 36
37.. note::
38
39 This documents the proposed CEC API. This API is not yet finalized
40 and is currently only available as a staging kernel module.
41
42To receive a CEC message the application has to fill in the 37To receive a CEC message the application has to fill in the
43``timeout`` field of struct :c:type:`cec_msg` and pass it to 38``timeout`` field of struct :c:type:`cec_msg` and pass it to
44:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. 39:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 8a39ce45d8a0..008ecb588317 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
35The default suspend mode (ie. the one to be used without writing anything into 35The default suspend mode (ie. the one to be used without writing anything into
36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or 36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
37"s2idle", but it can be overridden by the value of the "mem_sleep_default" 37"s2idle", but it can be overridden by the value of the "mem_sleep_default"
38parameter in the kernel command line. On some ACPI-based systems, depending on 38parameter in the kernel command line.
39the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
40is supported.
41 39
42The properties of all of the sleep states are described below. 40The properties of all of the sleep states are described below.
43 41
diff --git a/MAINTAINERS b/MAINTAINERS
index 26edd832c64e..187b9615e31a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3567,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
3567F: include/uapi/rdma/cxgb3-abi.h 3567F: include/uapi/rdma/cxgb3-abi.h
3568 3568
3569CXGB4 ETHERNET DRIVER (CXGB4) 3569CXGB4 ETHERNET DRIVER (CXGB4)
3570M: Hariprasad S <hariprasad@chelsio.com> 3570M: Ganesh Goudar <ganeshgr@chelsio.com>
3571L: netdev@vger.kernel.org 3571L: netdev@vger.kernel.org
3572W: http://www.chelsio.com 3572W: http://www.chelsio.com
3573S: Supported 3573S: Supported
@@ -4100,12 +4100,18 @@ F: drivers/gpu/drm/bridge/
4100 4100
4101DRM DRIVER FOR BOCHS VIRTUAL GPU 4101DRM DRIVER FOR BOCHS VIRTUAL GPU
4102M: Gerd Hoffmann <kraxel@redhat.com> 4102M: Gerd Hoffmann <kraxel@redhat.com>
4103S: Odd Fixes 4103L: virtualization@lists.linux-foundation.org
4104T: git git://git.kraxel.org/linux drm-qemu
4105S: Maintained
4104F: drivers/gpu/drm/bochs/ 4106F: drivers/gpu/drm/bochs/
4105 4107
4106DRM DRIVER FOR QEMU'S CIRRUS DEVICE 4108DRM DRIVER FOR QEMU'S CIRRUS DEVICE
4107M: Dave Airlie <airlied@redhat.com> 4109M: Dave Airlie <airlied@redhat.com>
4108S: Odd Fixes 4110M: Gerd Hoffmann <kraxel@redhat.com>
4111L: virtualization@lists.linux-foundation.org
4112T: git git://git.kraxel.org/linux drm-qemu
4113S: Obsolete
4114W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
4109F: drivers/gpu/drm/cirrus/ 4115F: drivers/gpu/drm/cirrus/
4110 4116
4111RADEON and AMDGPU DRM DRIVERS 4117RADEON and AMDGPU DRM DRIVERS
@@ -4147,7 +4153,7 @@ F: Documentation/gpu/i915.rst
4147INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4153INTEL GVT-g DRIVERS (Intel GPU Virtualization)
4148M: Zhenyu Wang <zhenyuw@linux.intel.com> 4154M: Zhenyu Wang <zhenyuw@linux.intel.com>
4149M: Zhi Wang <zhi.a.wang@intel.com> 4155M: Zhi Wang <zhi.a.wang@intel.com>
4150L: igvt-g-dev@lists.01.org 4156L: intel-gvt-dev@lists.freedesktop.org
4151L: intel-gfx@lists.freedesktop.org 4157L: intel-gfx@lists.freedesktop.org
4152W: https://01.org/igvt-g 4158W: https://01.org/igvt-g
4153T: git https://github.com/01org/gvt-linux.git 4159T: git https://github.com/01org/gvt-linux.git
@@ -4298,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
4298 4304
4299DRM DRIVER FOR QXL VIRTUAL GPU 4305DRM DRIVER FOR QXL VIRTUAL GPU
4300M: Dave Airlie <airlied@redhat.com> 4306M: Dave Airlie <airlied@redhat.com>
4301S: Odd Fixes 4307M: Gerd Hoffmann <kraxel@redhat.com>
4308L: virtualization@lists.linux-foundation.org
4309T: git git://git.kraxel.org/linux drm-qemu
4310S: Maintained
4302F: drivers/gpu/drm/qxl/ 4311F: drivers/gpu/drm/qxl/
4303F: include/uapi/drm/qxl_drm.h 4312F: include/uapi/drm/qxl_drm.h
4304 4313
@@ -10186,7 +10195,6 @@ F: drivers/media/tuners/qt1010*
10186QUALCOMM ATHEROS ATH9K WIRELESS DRIVER 10195QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
10187M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com> 10196M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
10188L: linux-wireless@vger.kernel.org 10197L: linux-wireless@vger.kernel.org
10189L: ath9k-devel@lists.ath9k.org
10190W: http://wireless.kernel.org/en/users/Drivers/ath9k 10198W: http://wireless.kernel.org/en/users/Drivers/ath9k
10191S: Supported 10199S: Supported
10192F: drivers/net/wireless/ath/ath9k/ 10200F: drivers/net/wireless/ath/ath9k/
@@ -13057,7 +13065,7 @@ F: drivers/input/serio/userio.c
13057F: include/uapi/linux/userio.h 13065F: include/uapi/linux/userio.h
13058 13066
13059VIRTIO CONSOLE DRIVER 13067VIRTIO CONSOLE DRIVER
13060M: Amit Shah <amit.shah@redhat.com> 13068M: Amit Shah <amit@kernel.org>
13061L: virtualization@lists.linux-foundation.org 13069L: virtualization@lists.linux-foundation.org
13062S: Maintained 13070S: Maintained
13063F: drivers/char/virtio_console.c 13071F: drivers/char/virtio_console.c
@@ -13092,6 +13100,7 @@ M: David Airlie <airlied@linux.ie>
13092M: Gerd Hoffmann <kraxel@redhat.com> 13100M: Gerd Hoffmann <kraxel@redhat.com>
13093L: dri-devel@lists.freedesktop.org 13101L: dri-devel@lists.freedesktop.org
13094L: virtualization@lists.linux-foundation.org 13102L: virtualization@lists.linux-foundation.org
13103T: git git://git.kraxel.org/linux drm-qemu
13095S: Maintained 13104S: Maintained
13096F: drivers/gpu/drm/virtio/ 13105F: drivers/gpu/drm/virtio/
13097F: include/uapi/linux/virtio_gpu.h 13106F: include/uapi/linux/virtio_gpu.h
@@ -13443,6 +13452,7 @@ F: arch/x86/
13443 13452
13444X86 PLATFORM DRIVERS 13453X86 PLATFORM DRIVERS
13445M: Darren Hart <dvhart@infradead.org> 13454M: Darren Hart <dvhart@infradead.org>
13455M: Andy Shevchenko <andy@infradead.org>
13446L: platform-driver-x86@vger.kernel.org 13456L: platform-driver-x86@vger.kernel.org
13447T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git 13457T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
13448S: Maintained 13458S: Maintained
@@ -13614,6 +13624,7 @@ F: drivers/net/hamradio/z8530.h
13614 13624
13615ZBUD COMPRESSED PAGE ALLOCATOR 13625ZBUD COMPRESSED PAGE ALLOCATOR
13616M: Seth Jennings <sjenning@redhat.com> 13626M: Seth Jennings <sjenning@redhat.com>
13627M: Dan Streetman <ddstreet@ieee.org>
13617L: linux-mm@kvack.org 13628L: linux-mm@kvack.org
13618S: Maintained 13629S: Maintained
13619F: mm/zbud.c 13630F: mm/zbud.c
@@ -13669,6 +13680,7 @@ F: Documentation/vm/zsmalloc.txt
13669 13680
13670ZSWAP COMPRESSED SWAP CACHING 13681ZSWAP COMPRESSED SWAP CACHING
13671M: Seth Jennings <sjenning@redhat.com> 13682M: Seth Jennings <sjenning@redhat.com>
13683M: Dan Streetman <ddstreet@ieee.org>
13672L: linux-mm@kvack.org 13684L: linux-mm@kvack.org
13673S: Maintained 13685S: Maintained
13674F: mm/zswap.c 13686F: mm/zswap.c
diff --git a/Makefile b/Makefile
index 098840012b9b..8e223e081c9d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc7
5NAME = Anniversary Edition 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
797KBUILD_ARFLAGS := $(call ar-option,D) 797KBUILD_ARFLAGS := $(call ar-option,D)
798 798
799# check for 'asm goto' 799# check for 'asm goto'
800ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) 800ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
801 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO 801 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
802 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO 802 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
803endif 803endif
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index a36e8601114d..d5da2115d78a 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
26 " lp 1f \n" 26 " lp 1f \n"
27 " nop \n" 27 " nop \n"
28 "1: \n" 28 "1: \n"
29 : : "r"(loops)); 29 :
30 : "r"(loops)
31 : "lp_count");
30} 32}
31 33
32extern void __bad_udelay(void); 34extern void __bad_udelay(void);
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff..8b90d25a15cc 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@ ENTRY(stext)
71 GET_CPU_ID r5 71 GET_CPU_ID r5
72 cmp r5, 0 72 cmp r5, 0
73 mov.nz r0, r5 73 mov.nz r0, r5
74#ifdef CONFIG_ARC_SMP_HALT_ON_RESET 74 bz .Lmaster_proceed
75 ; Non-Master can proceed as system would be booted sufficiently 75
76 jnz first_lines_of_secondary
77#else
78 ; Non-Masters wait for Master to boot enough and bring them up 76 ; Non-Masters wait for Master to boot enough and bring them up
79 jnz arc_platform_smp_wait_to_boot 77 ; when they resume, tail-call to entry point
80#endif 78 mov blink, @first_lines_of_secondary
81 ; Master falls thru 79 j arc_platform_smp_wait_to_boot
80
81.Lmaster_proceed:
82#endif 82#endif
83 83
84 ; Clear BSS before updating any globals 84 ; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 9274f8ade8c7..9f6b68fd4f3b 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
93 READ_BCR(ARC_REG_MCIP_BCR, mp); 93 READ_BCR(ARC_REG_MCIP_BCR, mp);
94 94
95 sprintf(smp_cpuinfo_buf, 95 sprintf(smp_cpuinfo_buf,
96 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", 96 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
97 mp.ver, mp.num_cores, 97 mp.ver, mp.num_cores,
98 IS_AVAIL1(mp.ipi, "IPI "), 98 IS_AVAIL1(mp.ipi, "IPI "),
99 IS_AVAIL1(mp.idu, "IDU "), 99 IS_AVAIL1(mp.idu, "IDU "),
100 IS_AVAIL1(mp.llm, "LLM "),
101 IS_AVAIL1(mp.dbg, "DEBUG "), 100 IS_AVAIL1(mp.dbg, "DEBUG "),
102 IS_AVAIL1(mp.gfrc, "GFRC")); 101 IS_AVAIL1(mp.gfrc, "GFRC"));
103 102
@@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
175 raw_spin_unlock_irqrestore(&mcip_lock, flags); 174 raw_spin_unlock_irqrestore(&mcip_lock, flags);
176} 175}
177 176
178#ifdef CONFIG_SMP
179static int 177static int
180idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 178idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
181 bool force) 179 bool force)
@@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
205 203
206 return IRQ_SET_MASK_OK; 204 return IRQ_SET_MASK_OK;
207} 205}
208#endif 206
207static void idu_irq_enable(struct irq_data *data)
208{
209 /*
210 * By default send all common interrupts to all available online CPUs.
211 * The affinity of common interrupts in IDU must be set manually since
212 * in some cases the kernel will not call irq_set_affinity() by itself:
213 * 1. When the kernel is not configured with support of SMP.
214 * 2. When the kernel is configured with support of SMP but upper
215 * interrupt controllers does not support setting of the affinity
216 * and cannot propagate it to IDU.
217 */
218 idu_irq_set_affinity(data, cpu_online_mask, false);
219 idu_irq_unmask(data);
220}
209 221
210static struct irq_chip idu_irq_chip = { 222static struct irq_chip idu_irq_chip = {
211 .name = "MCIP IDU Intc", 223 .name = "MCIP IDU Intc",
212 .irq_mask = idu_irq_mask, 224 .irq_mask = idu_irq_mask,
213 .irq_unmask = idu_irq_unmask, 225 .irq_unmask = idu_irq_unmask,
226 .irq_enable = idu_irq_enable,
214#ifdef CONFIG_SMP 227#ifdef CONFIG_SMP
215 .irq_set_affinity = idu_irq_set_affinity, 228 .irq_set_affinity = idu_irq_set_affinity,
216#endif 229#endif
@@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
243 const u32 *intspec, unsigned int intsize, 256 const u32 *intspec, unsigned int intsize,
244 irq_hw_number_t *out_hwirq, unsigned int *out_type) 257 irq_hw_number_t *out_hwirq, unsigned int *out_type)
245{ 258{
246 irq_hw_number_t hwirq = *out_hwirq = intspec[0]; 259 /*
247 int distri = intspec[1]; 260 * Ignore value of interrupt distribution mode for common interrupts in
248 unsigned long flags; 261 * IDU which resides in intspec[1] since setting an affinity using value
249 262 * from Device Tree is deprecated in ARC.
263 */
264 *out_hwirq = intspec[0];
250 *out_type = IRQ_TYPE_NONE; 265 *out_type = IRQ_TYPE_NONE;
251 266
252 /* XXX: validate distribution scheme again online cpu mask */
253 if (distri == 0) {
254 /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
255 raw_spin_lock_irqsave(&mcip_lock, flags);
256 idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
257 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
258 raw_spin_unlock_irqrestore(&mcip_lock, flags);
259 } else {
260 /*
261 * DEST based distribution for Level Triggered intr can only
262 * have 1 CPU, so generalize it to always contain 1 cpu
263 */
264 int cpu = ffs(distri);
265
266 if (cpu != fls(distri))
267 pr_warn("IDU irq %lx distri mode set to cpu %x\n",
268 hwirq, cpu);
269
270 raw_spin_lock_irqsave(&mcip_lock, flags);
271 idu_set_dest(hwirq, cpu);
272 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
273 raw_spin_unlock_irqrestore(&mcip_lock, flags);
274 }
275
276 return 0; 267 return 0;
277} 268}
278 269
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d972c9d..2afbafadb6ab 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
90 */ 90 */
91static volatile int wake_flag; 91static volatile int wake_flag;
92 92
93#ifdef CONFIG_ISA_ARCOMPACT
94
95#define __boot_read(f) f
96#define __boot_write(f, v) f = v
97
98#else
99
100#define __boot_read(f) arc_read_uncached_32(&f)
101#define __boot_write(f, v) arc_write_uncached_32(&f, v)
102
103#endif
104
93static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) 105static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
94{ 106{
95 BUG_ON(cpu == 0); 107 BUG_ON(cpu == 0);
96 wake_flag = cpu; 108
109 __boot_write(wake_flag, cpu);
97} 110}
98 111
99void arc_platform_smp_wait_to_boot(int cpu) 112void arc_platform_smp_wait_to_boot(int cpu)
100{ 113{
101 while (wake_flag != cpu) 114 /* for halt-on-reset, we've waited already */
115 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
116 return;
117
118 while (__boot_read(wake_flag) != cpu)
102 ; 119 ;
103 120
104 wake_flag = 0; 121 __boot_write(wake_flag, 0);
105 __asm__ __volatile__("j @first_lines_of_secondary \n");
106} 122}
107 123
108
109const char *arc_platform_smp_cpuinfo(void) 124const char *arc_platform_smp_cpuinfo(void)
110{ 125{
111 return plat_smp_ops.info ? : ""; 126 return plat_smp_ops.info ? : "";
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index abd961f3e763..91ebe382147f 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
241 if (state.fault) 241 if (state.fault)
242 goto fault; 242 goto fault;
243 243
244 /* clear any remanants of delay slot */
244 if (delay_mode(regs)) { 245 if (delay_mode(regs)) {
245 regs->ret = regs->bta; 246 regs->ret = regs->bta ~1U;
246 regs->status32 &= ~STATUS_DE_MASK; 247 regs->status32 &= ~STATUS_DE_MASK;
247 } else { 248 } else {
248 regs->ret += state.instr_len; 249 regs->ret += state.instr_len;
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index c53dbeae79f2..838dad5c209f 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
193 cbz w6, .Lcbcencloop 193 cbz w6, .Lcbcencloop
194 194
195 ld1 {v0.16b}, [x5] /* get iv */ 195 ld1 {v0.16b}, [x5] /* get iv */
196 enc_prepare w3, x2, x5 196 enc_prepare w3, x2, x6
197 197
198.Lcbcencloop: 198.Lcbcencloop:
199 ld1 {v1.16b}, [x1], #16 /* get next pt block */ 199 ld1 {v1.16b}, [x1], #16 /* get next pt block */
200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ 200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
201 encrypt_block v0, w3, x2, x5, w6 201 encrypt_block v0, w3, x2, x6, w7
202 st1 {v0.16b}, [x0], #16 202 st1 {v0.16b}, [x0], #16
203 subs w4, w4, #1 203 subs w4, w4, #1
204 bne .Lcbcencloop 204 bne .Lcbcencloop
205 st1 {v0.16b}, [x5] /* return iv */
205 ret 206 ret
206AES_ENDPROC(aes_cbc_encrypt) 207AES_ENDPROC(aes_cbc_encrypt)
207 208
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
211 cbz w6, .LcbcdecloopNx 212 cbz w6, .LcbcdecloopNx
212 213
213 ld1 {v7.16b}, [x5] /* get iv */ 214 ld1 {v7.16b}, [x5] /* get iv */
214 dec_prepare w3, x2, x5 215 dec_prepare w3, x2, x6
215 216
216.LcbcdecloopNx: 217.LcbcdecloopNx:
217#if INTERLEAVE >= 2 218#if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
248.Lcbcdecloop: 249.Lcbcdecloop:
249 ld1 {v1.16b}, [x1], #16 /* get next ct block */ 250 ld1 {v1.16b}, [x1], #16 /* get next ct block */
250 mov v0.16b, v1.16b /* ...and copy to v0 */ 251 mov v0.16b, v1.16b /* ...and copy to v0 */
251 decrypt_block v0, w3, x2, x5, w6 252 decrypt_block v0, w3, x2, x6, w7
252 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ 253 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
253 mov v7.16b, v1.16b /* ct is next iv */ 254 mov v7.16b, v1.16b /* ct is next iv */
254 st1 {v0.16b}, [x0], #16 255 st1 {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
256 bne .Lcbcdecloop 257 bne .Lcbcdecloop
257.Lcbcdecout: 258.Lcbcdecout:
258 FRAME_POP 259 FRAME_POP
260 st1 {v7.16b}, [x5] /* return iv */
259 ret 261 ret
260AES_ENDPROC(aes_cbc_decrypt) 262AES_ENDPROC(aes_cbc_decrypt)
261 263
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
267 269
268AES_ENTRY(aes_ctr_encrypt) 270AES_ENTRY(aes_ctr_encrypt)
269 FRAME_PUSH 271 FRAME_PUSH
270 cbnz w6, .Lctrfirst /* 1st time around? */ 272 cbz w6, .Lctrnotfirst /* 1st time around? */
271 umov x5, v4.d[1] /* keep swabbed ctr in reg */
272 rev x5, x5
273#if INTERLEAVE >= 2
274 cmn w5, w4 /* 32 bit overflow? */
275 bcs .Lctrinc
276 add x5, x5, #1 /* increment BE ctr */
277 b .LctrincNx
278#else
279 b .Lctrinc
280#endif
281.Lctrfirst:
282 enc_prepare w3, x2, x6 273 enc_prepare w3, x2, x6
283 ld1 {v4.16b}, [x5] 274 ld1 {v4.16b}, [x5]
284 umov x5, v4.d[1] /* keep swabbed ctr in reg */ 275
285 rev x5, x5 276.Lctrnotfirst:
277 umov x8, v4.d[1] /* keep swabbed ctr in reg */
278 rev x8, x8
286#if INTERLEAVE >= 2 279#if INTERLEAVE >= 2
287 cmn w5, w4 /* 32 bit overflow? */ 280 cmn w8, w4 /* 32 bit overflow? */
288 bcs .Lctrloop 281 bcs .Lctrloop
289.LctrloopNx: 282.LctrloopNx:
290 subs w4, w4, #INTERLEAVE 283 subs w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
292#if INTERLEAVE == 2 285#if INTERLEAVE == 2
293 mov v0.8b, v4.8b 286 mov v0.8b, v4.8b
294 mov v1.8b, v4.8b 287 mov v1.8b, v4.8b
295 rev x7, x5 288 rev x7, x8
296 add x5, x5, #1 289 add x8, x8, #1
297 ins v0.d[1], x7 290 ins v0.d[1], x7
298 rev x7, x5 291 rev x7, x8
299 add x5, x5, #1 292 add x8, x8, #1
300 ins v1.d[1], x7 293 ins v1.d[1], x7
301 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ 294 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
302 do_encrypt_block2x 295 do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
305 st1 {v0.16b-v1.16b}, [x0], #32 298 st1 {v0.16b-v1.16b}, [x0], #32
306#else 299#else
307 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ 300 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
308 dup v7.4s, w5 301 dup v7.4s, w8
309 mov v0.16b, v4.16b 302 mov v0.16b, v4.16b
310 add v7.4s, v7.4s, v8.4s 303 add v7.4s, v7.4s, v8.4s
311 mov v1.16b, v4.16b 304 mov v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
323 eor v2.16b, v7.16b, v2.16b 316 eor v2.16b, v7.16b, v2.16b
324 eor v3.16b, v5.16b, v3.16b 317 eor v3.16b, v5.16b, v3.16b
325 st1 {v0.16b-v3.16b}, [x0], #64 318 st1 {v0.16b-v3.16b}, [x0], #64
326 add x5, x5, #INTERLEAVE 319 add x8, x8, #INTERLEAVE
327#endif 320#endif
328 cbz w4, .LctroutNx 321 rev x7, x8
329.LctrincNx:
330 rev x7, x5
331 ins v4.d[1], x7 322 ins v4.d[1], x7
323 cbz w4, .Lctrout
332 b .LctrloopNx 324 b .LctrloopNx
333.LctroutNx:
334 sub x5, x5, #1
335 rev x7, x5
336 ins v4.d[1], x7
337 b .Lctrout
338.Lctr1x: 325.Lctr1x:
339 adds w4, w4, #INTERLEAVE 326 adds w4, w4, #INTERLEAVE
340 beq .Lctrout 327 beq .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
342.Lctrloop: 329.Lctrloop:
343 mov v0.16b, v4.16b 330 mov v0.16b, v4.16b
344 encrypt_block v0, w3, x2, x6, w7 331 encrypt_block v0, w3, x2, x6, w7
332
333 adds x8, x8, #1 /* increment BE ctr */
334 rev x7, x8
335 ins v4.d[1], x7
336 bcs .Lctrcarry /* overflow? */
337
338.Lctrcarrydone:
345 subs w4, w4, #1 339 subs w4, w4, #1
346 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ 340 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
347 ld1 {v3.16b}, [x1], #16 341 ld1 {v3.16b}, [x1], #16
348 eor v3.16b, v0.16b, v3.16b 342 eor v3.16b, v0.16b, v3.16b
349 st1 {v3.16b}, [x0], #16 343 st1 {v3.16b}, [x0], #16
350 beq .Lctrout 344 bne .Lctrloop
351.Lctrinc: 345
352 adds x5, x5, #1 /* increment BE ctr */ 346.Lctrout:
353 rev x7, x5 347 st1 {v4.16b}, [x5] /* return next CTR value */
354 ins v4.d[1], x7 348 FRAME_POP
355 bcc .Lctrloop /* no overflow? */ 349 ret
356 umov x7, v4.d[0] /* load upper word of ctr */ 350
357 rev x7, x7 /* ... to handle the carry */
358 add x7, x7, #1
359 rev x7, x7
360 ins v4.d[0], x7
361 b .Lctrloop
362.Lctrhalfblock: 351.Lctrhalfblock:
363 ld1 {v3.8b}, [x1] 352 ld1 {v3.8b}, [x1]
364 eor v3.8b, v0.8b, v3.8b 353 eor v3.8b, v0.8b, v3.8b
365 st1 {v3.8b}, [x0] 354 st1 {v3.8b}, [x0]
366.Lctrout:
367 FRAME_POP 355 FRAME_POP
368 ret 356 ret
357
358.Lctrcarry:
359 umov x7, v4.d[0] /* load upper word of ctr */
360 rev x7, x7 /* ... to handle the carry */
361 add x7, x7, #1
362 rev x7, x7
363 ins v4.d[0], x7
364 b .Lctrcarrydone
369AES_ENDPROC(aes_ctr_encrypt) 365AES_ENDPROC(aes_ctr_encrypt)
370 .ltorg 366 .ltorg
371 367
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 23e9e13bd2aa..655e65f38f31 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,6 +11,7 @@
11 * for more details. 11 * for more details.
12 */ 12 */
13 13
14#include <linux/acpi.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/cpumask.h> 16#include <linux/cpumask.h>
16#include <linux/init.h> 17#include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
209 210
210static int __init register_cpufreq_notifier(void) 211static int __init register_cpufreq_notifier(void)
211{ 212{
212 if (cap_parsing_failed) 213 /*
214 * on ACPI-based systems we need to use the default cpu capacity
215 * until we have the necessary code to parse the cpu capacity, so
216 * skip registering cpufreq notifier.
217 */
218 if (!acpi_disabled || cap_parsing_failed)
213 return -EINVAL; 219 return -EINVAL;
214 220
215 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { 221 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e264fc7..e93c9494503a 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
139#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 139#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
140#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 140#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
141#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 141#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
142 142#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
143 143
144#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 144#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
145#define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 145#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
161 return c; 161 return c;
162} 162}
163 163
164static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
165{
166 long long c, old;
167
168 c = atomic64_read(v);
169 for (;;) {
170 if (unlikely(c == u))
171 break;
172 old = atomic64_cmpxchg(v, c, c + i);
173 if (likely(old == c))
174 break;
175 c = old;
176 }
177 return c != u;
178}
179
180static inline long long atomic64_dec_if_positive(atomic64_t *v)
181{
182 long long c, old, dec;
183
184 c = atomic64_read(v);
185 for (;;) {
186 dec = c - 1;
187 if (unlikely(dec < 0))
188 break;
189 old = atomic64_cmpxchg((v), c, dec);
190 if (likely(old == c))
191 break;
192 c = old;
193 }
194 return dec;
195}
196
164#define ATOMIC_OP(op) \ 197#define ATOMIC_OP(op) \
165static inline int atomic_fetch_##op(int i, atomic_t *v) \ 198static inline int atomic_fetch_##op(int i, atomic_t *v) \
166{ \ 199{ \
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311735c8..67e333aa7629 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
16struct task_struct; 16struct task_struct;
17struct thread_struct; 17struct thread_struct;
18 18
19#if !defined(CONFIG_LAZY_SAVE_FPU) 19#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
20struct fpu_state_struct; 20struct fpu_state_struct;
21extern asmlinkage void fpu_save(struct fpu_state_struct *); 21extern asmlinkage void fpu_save(struct fpu_state_struct *);
22#define switch_fpu(prev, next) \ 22#define switch_fpu(prev, next) \
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 3f9406d9b9d6..da87943328a5 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -6,7 +6,7 @@
6#endif 6#endif
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ 9#include <asm/types.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/barrier.h> 11#include <asm/barrier.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
@@ -17,6 +17,12 @@
17 * to include/asm-i386/bitops.h or kerneldoc 17 * to include/asm-i386/bitops.h or kerneldoc
18 */ 18 */
19 19
20#if __BITS_PER_LONG == 64
21#define SHIFT_PER_LONG 6
22#else
23#define SHIFT_PER_LONG 5
24#endif
25
20#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) 26#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
21 27
22 28
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index e0a23c7bdd43..07fa7e50bdc0 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -3,10 +3,8 @@
3 3
4#if defined(__LP64__) 4#if defined(__LP64__)
5#define __BITS_PER_LONG 64 5#define __BITS_PER_LONG 64
6#define SHIFT_PER_LONG 6
7#else 6#else
8#define __BITS_PER_LONG 32 7#define __BITS_PER_LONG 32
9#define SHIFT_PER_LONG 5
10#endif 8#endif
11 9
12#include <asm-generic/bitsperlong.h> 10#include <asm-generic/bitsperlong.h>
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
index e78403b129ef..928e1bbac98f 100644
--- a/arch/parisc/include/uapi/asm/swab.h
+++ b/arch/parisc/include/uapi/asm/swab.h
@@ -1,6 +1,7 @@
1#ifndef _PARISC_SWAB_H 1#ifndef _PARISC_SWAB_H
2#define _PARISC_SWAB_H 2#define _PARISC_SWAB_H
3 3
4#include <asm/bitsperlong.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/compiler.h> 6#include <linux/compiler.h>
6 7
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
38} 39}
39#define __arch_swab32 __arch_swab32 40#define __arch_swab32 __arch_swab32
40 41
41#if BITS_PER_LONG > 32 42#if __BITS_PER_LONG > 32
42/* 43/*
43** From "PA-RISC 2.0 Architecture", HP Professional Books. 44** From "PA-RISC 2.0 Architecture", HP Professional Books.
44** See Appendix I page 8 , "Endian Byte Swapping". 45** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
61 return x; 62 return x;
62} 63}
63#define __arch_swab64 __arch_swab64 64#define __arch_swab64 __arch_swab64
64#endif /* BITS_PER_LONG > 32 */ 65#endif /* __BITS_PER_LONG > 32 */
65 66
66#endif /* _PARISC_SWAB_H */ 67#endif /* _PARISC_SWAB_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8ee573fe610..281f4f1fcd1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -164,7 +164,6 @@ config PPC
164 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE 164 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
165 select HAVE_ARCH_HARDENED_USERCOPY 165 select HAVE_ARCH_HARDENED_USERCOPY
166 select HAVE_KERNEL_GZIP 166 select HAVE_KERNEL_GZIP
167 select HAVE_CC_STACKPROTECTOR
168 167
169config GENERIC_CSUM 168config GENERIC_CSUM
170 def_bool CPU_LITTLE_ENDIAN 169 def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
484 bool "Build a relocatable kernel" 483 bool "Build a relocatable kernel"
485 depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) 484 depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
486 select NONSTATIC_KERNEL 485 select NONSTATIC_KERNEL
486 select MODULE_REL_CRCS if MODVERSIONS
487 help 487 help
488 This builds a kernel image that is capable of running at the 488 This builds a kernel image that is capable of running at the
489 location the kernel is loaded at. For ppc32, there is no any 489 location the kernel is loaded at. For ppc32, there is no any
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index b312b152461b..6e834caa3720 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
23{ 23{
24 int i; 24 int i;
25 25
26#ifndef __clang__ /* clang can't cope with this */
26 BUILD_BUG_ON(!__builtin_constant_p(feature)); 27 BUILD_BUG_ON(!__builtin_constant_p(feature));
28#endif
27 29
28#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 30#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
29 if (!static_key_initialized) { 31 if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index a34c764ca8dd..233a7e8cc8e3 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
160{ 160{
161 int i; 161 int i;
162 162
163#ifndef __clang__ /* clang can't cope with this */
163 BUILD_BUG_ON(!__builtin_constant_p(feature)); 164 BUILD_BUG_ON(!__builtin_constant_p(feature));
165#endif
164 166
165#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 167#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
166 if (!static_key_initialized) { 168 if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cc12c61ef315..53885512b8d3 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
90} 90}
91#endif 91#endif
92 92
93#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
94#define ARCH_RELOCATES_KCRCTAB
95#define reloc_start PHYSICAL_START
96#endif
97#endif /* __KERNEL__ */ 93#endif /* __KERNEL__ */
98#endif /* _ASM_POWERPC_MODULE_H */ 94#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644
index 6720190eabec..000000000000
--- a/arch/powerpc/include/asm/stackprotector.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * GCC stack protector support.
3 *
4 * Stack protector works by putting predefined pattern at the start of
5 * the stack frame and verifying that it hasn't been overwritten when
6 * returning from the function. The pattern is called stack canary
7 * and gcc expects it to be defined by a global variable called
8 * "__stack_chk_guard" on PPC. This unfortunately means that on SMP
9 * we cannot have a different canary value per task.
10 */
11
12#ifndef _ASM_STACKPROTECTOR_H
13#define _ASM_STACKPROTECTOR_H
14
15#include <linux/random.h>
16#include <linux/version.h>
17#include <asm/reg.h>
18
19extern unsigned long __stack_chk_guard;
20
21/*
22 * Initialize the stackprotector canary value.
23 *
24 * NOTE: this must only be called from functions that never return,
25 * and it must always be inlined.
26 */
27static __always_inline void boot_init_stack_canary(void)
28{
29 unsigned long canary;
30
31 /* Try to get a semi random initial value. */
32 get_random_bytes(&canary, sizeof(canary));
33 canary ^= mftb();
34 canary ^= LINUX_VERSION_CODE;
35
36 current->stack_canary = canary;
37 __stack_chk_guard = current->stack_canary;
38}
39
40#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 23f8082d7bfa..f4c2b52e58b3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
19CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) 19CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
20CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) 20CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
21 21
22# -fstack-protector triggers protection checks in this code,
23# but it is being used too early to link to meaningful stack_chk logic.
24CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
25
26ifdef CONFIG_FUNCTION_TRACER 22ifdef CONFIG_FUNCTION_TRACER
27# Do not trace early boot code 23# Do not trace early boot code
28CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) 24CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0601e6a7297c..195a9fc8f81c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,9 +91,6 @@ int main(void)
91 DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); 91 DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
92#endif 92#endif
93 93
94#ifdef CONFIG_CC_STACKPROTECTOR
95 DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
96#endif
97 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 94 DEFINE(KSP, offsetof(struct thread_struct, ksp));
98 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 95 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
99#ifdef CONFIG_BOOKE 96#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d88573bdd090..b94887165a10 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
545static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 545static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
546{ 546{
547 struct eeh_pe *pe = (struct eeh_pe *)data; 547 struct eeh_pe *pe = (struct eeh_pe *)data;
548 bool *clear_sw_state = flag; 548 bool clear_sw_state = *(bool *)flag;
549 int i, rc = 1; 549 int i, rc = 1;
550 550
551 for (i = 0; rc && i < 3; i++) 551 for (i = 0; rc && i < 3; i++)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 5742dbdbee46..3841d749a430 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ 674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
675END_FTR_SECTION_IFSET(CPU_FTR_SPE) 675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
676#endif /* CONFIG_SPE */ 676#endif /* CONFIG_SPE */
677#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 677
678 lwz r0,TSK_STACK_CANARY(r2)
679 lis r4,__stack_chk_guard@ha
680 stw r0,__stack_chk_guard@l(r4)
681#endif
682 lwz r0,_CCR(r1) 678 lwz r0,_CCR(r1)
683 mtcrf 0xFF,r0 679 mtcrf 0xFF,r0
684 /* r3-r12 are destroyed -- Cort */ 680 /* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index bb1807184bad..0b0f89685b67 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
286 for (end = (void *)vers + size; vers < end; vers++) 286 for (end = (void *)vers + size; vers < end; vers++)
287 if (vers->name[0] == '.') { 287 if (vers->name[0] == '.') {
288 memmove(vers->name, vers->name+1, strlen(vers->name)); 288 memmove(vers->name, vers->name+1, strlen(vers->name));
289#ifdef ARCH_RELOCATES_KCRCTAB
290 /* The TOC symbol has no CRC computed. To avoid CRC
291 * check failing, we must force it to the expected
292 * value (see CRC check in module.c).
293 */
294 if (!strcmp(vers->name, "TOC."))
295 vers->crc = -(unsigned long)reloc_start;
296#endif
297 } 289 }
298} 290}
299 291
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 04885cec24df..5dd056df0baa 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,12 +64,6 @@
64#include <linux/kprobes.h> 64#include <linux/kprobes.h>
65#include <linux/kdebug.h> 65#include <linux/kdebug.h>
66 66
67#ifdef CONFIG_CC_STACKPROTECTOR
68#include <linux/stackprotector.h>
69unsigned long __stack_chk_guard __read_mostly;
70EXPORT_SYMBOL(__stack_chk_guard);
71#endif
72
73/* Transactional Memory debug */ 67/* Transactional Memory debug */
74#ifdef TM_DEBUG_SW 68#ifdef TM_DEBUG_SW
75#define TM_DEBUG(x...) printk(KERN_INFO x) 69#define TM_DEBUG(x...) printk(KERN_INFO x)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ec47a939cbdd..ac83eb04a8b8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
2834 2834
2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2836 2836
2837 if (!PHANDLE_VALID(cpu_pkg))
2838 return;
2839
2837 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 2840 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2838 prom.cpu = be32_to_cpu(rval); 2841 prom.cpu = be32_to_cpu(rval);
2839 2842
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index cfa53ccc8baf..34f1a0dbc898 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
65 if (!pmdp) 65 if (!pmdp)
66 return -ENOMEM; 66 return -ENOMEM;
67 if (map_page_size == PMD_SIZE) { 67 if (map_page_size == PMD_SIZE) {
68 ptep = (pte_t *)pudp; 68 ptep = pmdp_ptep(pmdp);
69 goto set_the_pte; 69 goto set_the_pte;
70 } 70 }
71 ptep = pte_alloc_kernel(pmdp, ea); 71 ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
90 } 90 }
91 pmdp = pmd_offset(pudp, ea); 91 pmdp = pmd_offset(pudp, ea);
92 if (map_page_size == PMD_SIZE) { 92 if (map_page_size == PMD_SIZE) {
93 ptep = (pte_t *)pudp; 93 ptep = pmdp_ptep(pmdp);
94 goto set_the_pte; 94 goto set_the_pte;
95 } 95 }
96 if (!pmd_present(*pmdp)) { 96 if (!pmd_present(*pmdp)) {
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7447ba509c30..12020b55887b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
963 if (target == current) 963 if (target == current)
964 save_fpu_regs(); 964 save_fpu_regs();
965 965
966 if (MACHINE_HAS_VX)
967 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
968 else
969 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
970
966 /* If setting FPC, must validate it first. */ 971 /* If setting FPC, must validate it first. */
967 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 972 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
968 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; 973 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
1067 if (target == current) 1072 if (target == current)
1068 save_fpu_regs(); 1073 save_fpu_regs();
1069 1074
1075 for (i = 0; i < __NUM_VXRS_LOW; i++)
1076 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1077
1070 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1078 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1071 if (rc == 0) 1079 if (rc == 0)
1072 for (i = 0; i < __NUM_VXRS_LOW; i++) 1080 for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..d56ef26d4681 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
202 return pgste; 202 return pgste;
203} 203}
204 204
205static inline void ptep_xchg_commit(struct mm_struct *mm, 205static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep, 206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new) 207 pgste_t pgste, pte_t old, pte_t new)
208{ 208{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
220 } else { 220 } else {
221 *ptep = new; 221 *ptep = new;
222 } 222 }
223 return old;
223} 224}
224 225
225pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, 226pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
231 preempt_disable(); 232 preempt_disable();
232 pgste = ptep_xchg_start(mm, addr, ptep); 233 pgste = ptep_xchg_start(mm, addr, ptep);
233 old = ptep_flush_direct(mm, addr, ptep); 234 old = ptep_flush_direct(mm, addr, ptep);
234 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 235 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
235 preempt_enable(); 236 preempt_enable();
236 return old; 237 return old;
237} 238}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
246 preempt_disable(); 247 preempt_disable();
247 pgste = ptep_xchg_start(mm, addr, ptep); 248 pgste = ptep_xchg_start(mm, addr, ptep);
248 old = ptep_flush_lazy(mm, addr, ptep); 249 old = ptep_flush_lazy(mm, addr, ptep);
249 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 250 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
250 preempt_enable(); 251 preempt_enable();
251 return old; 252 return old;
252} 253}
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..d0317993e947 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
35static inline void tsb_context_switch(struct mm_struct *mm) 35static inline void tsb_context_switch(struct mm_struct *mm)
36{ 36{
37 __tsb_context_switch(__pa(mm->pgd), 37 __tsb_context_switch(__pa(mm->pgd),
38 &mm->context.tsb_block[0], 38 &mm->context.tsb_block[MM_TSB_BASE],
39#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 39#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
40 (mm->context.tsb_block[1].tsb ? 40 (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
41 &mm->context.tsb_block[1] : 41 &mm->context.tsb_block[MM_TSB_HUGE] :
42 NULL) 42 NULL)
43#else 43#else
44 NULL 44 NULL
45#endif 45#endif
46 , __pa(&mm->context.tsb_descr[0])); 46 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
47} 47}
48 48
49void tsb_grow(struct mm_struct *mm, 49void tsb_grow(struct mm_struct *mm,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 3bebf395252c..4d0248aa0928 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
1021 unsigned long order = get_order(size); 1021 unsigned long order = get_order(size);
1022 unsigned long p; 1022 unsigned long p;
1023 1023
1024 p = __get_free_pages(GFP_KERNEL, order); 1024 p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1025 if (!p) { 1025 if (!p) {
1026 prom_printf("SUN4V: Error, cannot allocate queue.\n"); 1026 prom_printf("SUN4V: Error, cannot allocate queue.\n");
1027 prom_halt(); 1027 prom_halt();
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index c59af546f522..3caed4023589 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
43 "Linux powering off"; 43 "Linux powering off";
44static const char rebooting_msg[32] __attribute__((aligned(32))) = 44static const char rebooting_msg[32] __attribute__((aligned(32))) =
45 "Linux rebooting"; 45 "Linux rebooting";
46static const char panicing_msg[32] __attribute__((aligned(32))) = 46static const char panicking_msg[32] __attribute__((aligned(32))) =
47 "Linux panicing"; 47 "Linux panicking";
48 48
49static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) 49static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
50{ 50{
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
76 76
77static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) 77static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
78{ 78{
79 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); 79 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
80 80
81 return NOTIFY_DONE; 81 return NOTIFY_DONE;
82} 82}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4bc10e44d1ca..dfc97a47c9a0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
2051 atomic_inc(&sun4v_resum_oflow_cnt); 2051 atomic_inc(&sun4v_resum_oflow_cnt);
2052} 2052}
2053 2053
2054/* Given a set of registers, get the virtual addressi that was being accessed
2055 * by the faulting instructions at tpc.
2056 */
2057static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2058{
2059 unsigned int insn;
2060
2061 if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2062 return compute_effective_address(regs, insn,
2063 (insn >> 25) & 0x1f);
2064 }
2065 return 0;
2066}
2067
2068/* Attempt to handle non-resumable errors generated from userspace.
2069 * Returns true if the signal was handled, false otherwise.
2070 */
2071bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2072 struct sun4v_error_entry *ent) {
2073
2074 unsigned int attrs = ent->err_attrs;
2075
2076 if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2077 unsigned long addr = ent->err_raddr;
2078 siginfo_t info;
2079
2080 if (addr == ~(u64)0) {
2081 /* This seems highly unlikely to ever occur */
2082 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2083 } else {
2084 unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2085 PAGE_SIZE);
2086
2087 /* Break the unfortunate news. */
2088 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2089 addr);
2090 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
2091 page_cnt);
2092
2093 while (page_cnt-- > 0) {
2094 if (pfn_valid(addr >> PAGE_SHIFT))
2095 get_page(pfn_to_page(addr >> PAGE_SHIFT));
2096 addr += PAGE_SIZE;
2097 }
2098 }
2099 info.si_signo = SIGKILL;
2100 info.si_errno = 0;
2101 info.si_trapno = 0;
2102 force_sig_info(info.si_signo, &info, current);
2103
2104 return true;
2105 }
2106 if (attrs & SUN4V_ERR_ATTRS_PIO) {
2107 siginfo_t info;
2108
2109 info.si_signo = SIGBUS;
2110 info.si_code = BUS_ADRERR;
2111 info.si_addr = (void __user *)sun4v_get_vaddr(regs);
2112 force_sig_info(info.si_signo, &info, current);
2113
2114 return true;
2115 }
2116
2117 /* Default to doing nothing */
2118 return false;
2119}
2120
2054/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. 2121/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2055 * Log the event, clear the first word of the entry, and die. 2122 * Log the event, clear the first word of the entry, and die.
2056 */ 2123 */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2075 2142
2076 put_cpu(); 2143 put_cpu();
2077 2144
2145 if (!(regs->tstate & TSTATE_PRIV) &&
2146 sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2147 /* DON'T PANIC: This userspace error was handled. */
2148 return;
2149 }
2150
2078#ifdef CONFIG_PCI 2151#ifdef CONFIG_PCI
2079 /* Check for the special PCI poke sequence. */ 2152 /* Check for the special PCI poke sequence. */
2080 if (pci_poke_in_progress && pci_poke_cpu == cpu) { 2153 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b7011667c..e279572824b1 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
111 const void *kbuf, const void __user *ubuf) 111 const void *kbuf, const void __user *ubuf)
112{ 112{
113 int ret; 113 int ret;
114 struct pt_regs regs; 114 struct pt_regs regs = *task_pt_regs(target);
115 115
116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, 116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
117 sizeof(regs)); 117 sizeof(regs));
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 6ef688a1ef3e..7ff1b0c86a8e 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
1085 aesni_simd_skciphers[i]; i++) 1085 aesni_simd_skciphers[i]; i++)
1086 simd_skcipher_free(aesni_simd_skciphers[i]); 1086 simd_skcipher_free(aesni_simd_skciphers[i]);
1087 1087
1088 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && 1088 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1089 aesni_simd_skciphers2[i].simd; i++) 1089 if (aesni_simd_skciphers2[i].simd)
1090 simd_skcipher_free(aesni_simd_skciphers2[i].simd); 1090 simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1091} 1091}
1092 1092
1093static int __init aesni_init(void) 1093static int __init aesni_init(void)
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
1168 simd = simd_skcipher_create_compat(algname, drvname, basename); 1168 simd = simd_skcipher_create_compat(algname, drvname, basename);
1169 err = PTR_ERR(simd); 1169 err = PTR_ERR(simd);
1170 if (IS_ERR(simd)) 1170 if (IS_ERR(simd))
1171 goto unregister_simds; 1171 continue;
1172 1172
1173 aesni_simd_skciphers2[i].simd = simd; 1173 aesni_simd_skciphers2[i].simd = simd;
1174 } 1174 }
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 17c3564d087a..22ef4f72cf32 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
161 161
162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) 162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
163{ 163{
164 return rapl_pmus->pmus[topology_logical_package_id(cpu)]; 164 unsigned int pkgid = topology_logical_package_id(cpu);
165
166 /*
167 * The unsigned check also catches the '-1' return value for non
168 * existent mappings in the topology map.
169 */
170 return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
165} 171}
166 172
167static inline u64 rapl_read_counter(struct perf_event *event) 173static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
402 408
403 /* must be done before validate_group */ 409 /* must be done before validate_group */
404 pmu = cpu_to_rapl_pmu(event->cpu); 410 pmu = cpu_to_rapl_pmu(event->cpu);
411 if (!pmu)
412 return -EINVAL;
405 event->cpu = pmu->cpu; 413 event->cpu = pmu->cpu;
406 event->pmu_private = pmu; 414 event->pmu_private = pmu;
407 event->hw.event_base = msr; 415 event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
585 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); 593 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
586 int target; 594 int target;
587 595
596 if (!pmu) {
597 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
598 if (!pmu)
599 return -ENOMEM;
600
601 raw_spin_lock_init(&pmu->lock);
602 INIT_LIST_HEAD(&pmu->active_list);
603 pmu->pmu = &rapl_pmus->pmu;
604 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
605 rapl_hrtimer_init(pmu);
606
607 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
608 }
609
588 /* 610 /*
589 * Check if there is an online cpu in the package which collects rapl 611 * Check if there is an online cpu in the package which collects rapl
590 * events already. 612 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
598 return 0; 620 return 0;
599} 621}
600 622
601static int rapl_cpu_prepare(unsigned int cpu)
602{
603 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
604
605 if (pmu)
606 return 0;
607
608 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
609 if (!pmu)
610 return -ENOMEM;
611
612 raw_spin_lock_init(&pmu->lock);
613 INIT_LIST_HEAD(&pmu->active_list);
614 pmu->pmu = &rapl_pmus->pmu;
615 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
616 pmu->cpu = -1;
617 rapl_hrtimer_init(pmu);
618 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
619 return 0;
620}
621
622static int rapl_check_hw_unit(bool apply_quirk) 623static int rapl_check_hw_unit(bool apply_quirk)
623{ 624{
624 u64 msr_rapl_power_unit_bits; 625 u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
803 /* 804 /*
804 * Install callbacks. Core will call them for each online cpu. 805 * Install callbacks. Core will call them for each online cpu.
805 */ 806 */
806
807 ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
808 rapl_cpu_prepare, NULL);
809 if (ret)
810 goto out;
811
812 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, 807 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
813 "perf/x86/rapl:online", 808 "perf/x86/rapl:online",
814 rapl_cpu_online, rapl_cpu_offline); 809 rapl_cpu_online, rapl_cpu_offline);
815 if (ret) 810 if (ret)
816 goto out1; 811 goto out;
817 812
818 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); 813 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
819 if (ret) 814 if (ret)
820 goto out2; 815 goto out1;
821 816
822 rapl_advertise(); 817 rapl_advertise();
823 return 0; 818 return 0;
824 819
825out2:
826 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
827out1: 820out1:
828 cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); 821 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
829out: 822out:
830 pr_warn("Initialization failed (%d), disabled\n", ret); 823 pr_warn("Initialization failed (%d), disabled\n", ret);
831 cleanup_rapl_pmus(); 824 cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
836static void __exit intel_rapl_exit(void) 829static void __exit intel_rapl_exit(void)
837{ 830{
838 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); 831 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
839 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
840 perf_pmu_unregister(&rapl_pmus->pmu); 832 perf_pmu_unregister(&rapl_pmus->pmu);
841 cleanup_rapl_pmus(); 833 cleanup_rapl_pmus();
842} 834}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 8c4ccdc3a3f3..1ab45976474d 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
100 100
101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) 101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
102{ 102{
103 return pmu->boxes[topology_logical_package_id(cpu)]; 103 unsigned int pkgid = topology_logical_package_id(cpu);
104
105 /*
106 * The unsigned check also catches the '-1' return value for non
107 * existent mappings in the topology map.
108 */
109 return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
104} 110}
105 111
106u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) 112u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
764 pmu->registered = false; 770 pmu->registered = false;
765} 771}
766 772
767static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
768{
769 struct intel_uncore_pmu *pmu = type->pmus;
770 struct intel_uncore_box *box;
771 int i, pkg;
772
773 if (pmu) {
774 pkg = topology_physical_package_id(cpu);
775 for (i = 0; i < type->num_boxes; i++, pmu++) {
776 box = pmu->boxes[pkg];
777 if (box)
778 uncore_box_exit(box);
779 }
780 }
781}
782
783static void uncore_exit_boxes(void *dummy)
784{
785 struct intel_uncore_type **types;
786
787 for (types = uncore_msr_uncores; *types; types++)
788 __uncore_exit_boxes(*types++, smp_processor_id());
789}
790
791static void uncore_free_boxes(struct intel_uncore_pmu *pmu) 773static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
792{ 774{
793 int pkg; 775 int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
1058 } 1040 }
1059} 1041}
1060 1042
1061static int uncore_cpu_dying(unsigned int cpu)
1062{
1063 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1064 struct intel_uncore_pmu *pmu;
1065 struct intel_uncore_box *box;
1066 int i, pkg;
1067
1068 pkg = topology_logical_package_id(cpu);
1069 for (; *types; types++) {
1070 type = *types;
1071 pmu = type->pmus;
1072 for (i = 0; i < type->num_boxes; i++, pmu++) {
1073 box = pmu->boxes[pkg];
1074 if (box && atomic_dec_return(&box->refcnt) == 0)
1075 uncore_box_exit(box);
1076 }
1077 }
1078 return 0;
1079}
1080
1081static int first_init;
1082
1083static int uncore_cpu_starting(unsigned int cpu)
1084{
1085 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1086 struct intel_uncore_pmu *pmu;
1087 struct intel_uncore_box *box;
1088 int i, pkg, ncpus = 1;
1089
1090 if (first_init) {
1091 /*
1092 * On init we get the number of online cpus in the package
1093 * and set refcount for all of them.
1094 */
1095 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1096 }
1097
1098 pkg = topology_logical_package_id(cpu);
1099 for (; *types; types++) {
1100 type = *types;
1101 pmu = type->pmus;
1102 for (i = 0; i < type->num_boxes; i++, pmu++) {
1103 box = pmu->boxes[pkg];
1104 if (!box)
1105 continue;
1106 /* The first cpu on a package activates the box */
1107 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1108 uncore_box_init(box);
1109 }
1110 }
1111
1112 return 0;
1113}
1114
1115static int uncore_cpu_prepare(unsigned int cpu)
1116{
1117 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1118 struct intel_uncore_pmu *pmu;
1119 struct intel_uncore_box *box;
1120 int i, pkg;
1121
1122 pkg = topology_logical_package_id(cpu);
1123 for (; *types; types++) {
1124 type = *types;
1125 pmu = type->pmus;
1126 for (i = 0; i < type->num_boxes; i++, pmu++) {
1127 if (pmu->boxes[pkg])
1128 continue;
1129 /* First cpu of a package allocates the box */
1130 box = uncore_alloc_box(type, cpu_to_node(cpu));
1131 if (!box)
1132 return -ENOMEM;
1133 box->pmu = pmu;
1134 box->pkgid = pkg;
1135 pmu->boxes[pkg] = box;
1136 }
1137 }
1138 return 0;
1139}
1140
1141static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, 1043static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1142 int new_cpu) 1044 int new_cpu)
1143{ 1045{
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
1177 1079
1178static int uncore_event_cpu_offline(unsigned int cpu) 1080static int uncore_event_cpu_offline(unsigned int cpu)
1179{ 1081{
1180 int target; 1082 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1083 struct intel_uncore_pmu *pmu;
1084 struct intel_uncore_box *box;
1085 int i, pkg, target;
1181 1086
1182 /* Check if exiting cpu is used for collecting uncore events */ 1087 /* Check if exiting cpu is used for collecting uncore events */
1183 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) 1088 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1184 return 0; 1089 goto unref;
1185
1186 /* Find a new cpu to collect uncore events */ 1090 /* Find a new cpu to collect uncore events */
1187 target = cpumask_any_but(topology_core_cpumask(cpu), cpu); 1091 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1188 1092
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
1194 1098
1195 uncore_change_context(uncore_msr_uncores, cpu, target); 1099 uncore_change_context(uncore_msr_uncores, cpu, target);
1196 uncore_change_context(uncore_pci_uncores, cpu, target); 1100 uncore_change_context(uncore_pci_uncores, cpu, target);
1101
1102unref:
1103 /* Clear the references */
1104 pkg = topology_logical_package_id(cpu);
1105 for (; *types; types++) {
1106 type = *types;
1107 pmu = type->pmus;
1108 for (i = 0; i < type->num_boxes; i++, pmu++) {
1109 box = pmu->boxes[pkg];
1110 if (box && atomic_dec_return(&box->refcnt) == 0)
1111 uncore_box_exit(box);
1112 }
1113 }
1197 return 0; 1114 return 0;
1198} 1115}
1199 1116
1117static int allocate_boxes(struct intel_uncore_type **types,
1118 unsigned int pkg, unsigned int cpu)
1119{
1120 struct intel_uncore_box *box, *tmp;
1121 struct intel_uncore_type *type;
1122 struct intel_uncore_pmu *pmu;
1123 LIST_HEAD(allocated);
1124 int i;
1125
1126 /* Try to allocate all required boxes */
1127 for (; *types; types++) {
1128 type = *types;
1129 pmu = type->pmus;
1130 for (i = 0; i < type->num_boxes; i++, pmu++) {
1131 if (pmu->boxes[pkg])
1132 continue;
1133 box = uncore_alloc_box(type, cpu_to_node(cpu));
1134 if (!box)
1135 goto cleanup;
1136 box->pmu = pmu;
1137 box->pkgid = pkg;
1138 list_add(&box->active_list, &allocated);
1139 }
1140 }
1141 /* Install them in the pmus */
1142 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1143 list_del_init(&box->active_list);
1144 box->pmu->boxes[pkg] = box;
1145 }
1146 return 0;
1147
1148cleanup:
1149 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1150 list_del_init(&box->active_list);
1151 kfree(box);
1152 }
1153 return -ENOMEM;
1154}
1155
1200static int uncore_event_cpu_online(unsigned int cpu) 1156static int uncore_event_cpu_online(unsigned int cpu)
1201{ 1157{
1202 int target; 1158 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1159 struct intel_uncore_pmu *pmu;
1160 struct intel_uncore_box *box;
1161 int i, ret, pkg, target;
1162
1163 pkg = topology_logical_package_id(cpu);
1164 ret = allocate_boxes(types, pkg, cpu);
1165 if (ret)
1166 return ret;
1167
1168 for (; *types; types++) {
1169 type = *types;
1170 pmu = type->pmus;
1171 for (i = 0; i < type->num_boxes; i++, pmu++) {
1172 box = pmu->boxes[pkg];
1173 if (!box && atomic_inc_return(&box->refcnt) == 1)
1174 uncore_box_init(box);
1175 }
1176 }
1203 1177
1204 /* 1178 /*
1205 * Check if there is an online cpu in the package 1179 * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
1389 if (cret && pret) 1363 if (cret && pret)
1390 return -ENODEV; 1364 return -ENODEV;
1391 1365
1392 /* 1366 /* Install hotplug callbacks to setup the targets for each package */
1393 * Install callbacks. Core will call them for each online cpu. 1367 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1394 * 1368 "perf/x86/intel/uncore:online",
1395 * The first online cpu of each package allocates and takes 1369 uncore_event_cpu_online,
1396 * the refcounts for all other online cpus in that package. 1370 uncore_event_cpu_offline);
1397 * If msrs are not enabled no allocation is required and 1371 if (ret)
1398 * uncore_cpu_prepare() is not called for each online cpu. 1372 goto err;
1399 */
1400 if (!cret) {
1401 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
1402 "perf/x86/intel/uncore:prepare",
1403 uncore_cpu_prepare, NULL);
1404 if (ret)
1405 goto err;
1406 } else {
1407 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
1408 "perf/x86/intel/uncore:prepare",
1409 uncore_cpu_prepare, NULL);
1410 }
1411 first_init = 1;
1412 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1413 "perf/x86/uncore:starting",
1414 uncore_cpu_starting, uncore_cpu_dying);
1415 first_init = 0;
1416 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1417 "perf/x86/uncore:online",
1418 uncore_event_cpu_online, uncore_event_cpu_offline);
1419 return 0; 1373 return 0;
1420 1374
1421err: 1375err:
1422 /* Undo box->init_box() */
1423 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1424 uncore_types_exit(uncore_msr_uncores); 1376 uncore_types_exit(uncore_msr_uncores);
1425 uncore_pci_exit(); 1377 uncore_pci_exit();
1426 return ret; 1378 return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
1429 1381
1430static void __exit intel_uncore_exit(void) 1382static void __exit intel_uncore_exit(void)
1431{ 1383{
1432 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 1384 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1433 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
1434 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
1435 uncore_types_exit(uncore_msr_uncores); 1385 uncore_types_exit(uncore_msr_uncores);
1436 uncore_pci_exit(); 1386 uncore_pci_exit();
1437} 1387}
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 38711df3bcb5..2266f864b747 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
140extern void load_ucode_ap(void); 140extern void load_ucode_ap(void);
141void reload_early_microcode(void); 141void reload_early_microcode(void);
142extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); 142extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
143extern bool initrd_gone;
143#else 144#else
144static inline int __init microcode_init(void) { return 0; }; 145static inline int __init microcode_init(void) { return 0; };
145static inline void __init load_ucode_bsp(void) { } 146static inline void __init load_ucode_bsp(void) { }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 1e35dd06b090..52f352b063fd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
2117 if (idx != -1 && irq_trigger(idx)) 2117 if (idx != -1 && irq_trigger(idx))
2118 unmask_ioapic_irq(irq_get_chip_data(0)); 2118 unmask_ioapic_irq(irq_get_chip_data(0));
2119 } 2119 }
2120 irq_domain_deactivate_irq(irq_data);
2120 irq_domain_activate_irq(irq_data); 2121 irq_domain_activate_irq(irq_data);
2121 if (timer_irq_works()) { 2122 if (timer_irq_works()) {
2122 if (disable_timer_pin_1 > 0) 2123 if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
2138 * legacy devices should be connected to IO APIC #0 2139 * legacy devices should be connected to IO APIC #0
2139 */ 2140 */
2140 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); 2141 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2142 irq_domain_deactivate_irq(irq_data);
2141 irq_domain_activate_irq(irq_data); 2143 irq_domain_activate_irq(irq_data);
2142 legacy_pic->unmask(0); 2144 legacy_pic->unmask(0);
2143 if (timer_irq_works()) { 2145 if (timer_irq_works()) {
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 00ef43233e03..537c6647d84c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
1373 1373
1374static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 1374static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1375 1375
1376static void __restart_timer(struct timer_list *t, unsigned long interval) 1376static void __start_timer(struct timer_list *t, unsigned long interval)
1377{ 1377{
1378 unsigned long when = jiffies + interval; 1378 unsigned long when = jiffies + interval;
1379 unsigned long flags; 1379 unsigned long flags;
1380 1380
1381 local_irq_save(flags); 1381 local_irq_save(flags);
1382 1382
1383 if (timer_pending(t)) { 1383 if (!timer_pending(t) || time_before(when, t->expires))
1384 if (time_before(when, t->expires)) 1384 mod_timer(t, round_jiffies(when));
1385 mod_timer(t, when);
1386 } else {
1387 t->expires = round_jiffies(when);
1388 add_timer_on(t, smp_processor_id());
1389 }
1390 1385
1391 local_irq_restore(flags); 1386 local_irq_restore(flags);
1392} 1387}
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
1421 1416
1422done: 1417done:
1423 __this_cpu_write(mce_next_interval, iv); 1418 __this_cpu_write(mce_next_interval, iv);
1424 __restart_timer(t, iv); 1419 __start_timer(t, iv);
1425} 1420}
1426 1421
1427/* 1422/*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
1432 struct timer_list *t = this_cpu_ptr(&mce_timer); 1427 struct timer_list *t = this_cpu_ptr(&mce_timer);
1433 unsigned long iv = __this_cpu_read(mce_next_interval); 1428 unsigned long iv = __this_cpu_read(mce_next_interval);
1434 1429
1435 __restart_timer(t, interval); 1430 __start_timer(t, interval);
1436 1431
1437 if (interval < iv) 1432 if (interval < iv)
1438 __this_cpu_write(mce_next_interval, interval); 1433 __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1779 } 1774 }
1780} 1775}
1781 1776
1782static void mce_start_timer(unsigned int cpu, struct timer_list *t) 1777static void mce_start_timer(struct timer_list *t)
1783{ 1778{
1784 unsigned long iv = check_interval * HZ; 1779 unsigned long iv = check_interval * HZ;
1785 1780
1786 if (mca_cfg.ignore_ce || !iv) 1781 if (mca_cfg.ignore_ce || !iv)
1787 return; 1782 return;
1788 1783
1789 per_cpu(mce_next_interval, cpu) = iv; 1784 this_cpu_write(mce_next_interval, iv);
1790 1785 __start_timer(t, iv);
1791 t->expires = round_jiffies(jiffies + iv);
1792 add_timer_on(t, cpu);
1793} 1786}
1794 1787
1795static void __mcheck_cpu_setup_timer(void) 1788static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
1806 unsigned int cpu = smp_processor_id(); 1799 unsigned int cpu = smp_processor_id();
1807 1800
1808 setup_pinned_timer(t, mce_timer_fn, cpu); 1801 setup_pinned_timer(t, mce_timer_fn, cpu);
1809 mce_start_timer(cpu, t); 1802 mce_start_timer(t);
1810} 1803}
1811 1804
1812/* Handle unconfigured int18 (should never happen) */ 1805/* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
2566 2559
2567static int mce_cpu_online(unsigned int cpu) 2560static int mce_cpu_online(unsigned int cpu)
2568{ 2561{
2569 struct timer_list *t = &per_cpu(mce_timer, cpu); 2562 struct timer_list *t = this_cpu_ptr(&mce_timer);
2570 int ret; 2563 int ret;
2571 2564
2572 mce_device_create(cpu); 2565 mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
2577 return ret; 2570 return ret;
2578 } 2571 }
2579 mce_reenable_cpu(); 2572 mce_reenable_cpu();
2580 mce_start_timer(cpu, t); 2573 mce_start_timer(t);
2581 return 0; 2574 return 0;
2582} 2575}
2583 2576
2584static int mce_cpu_pre_down(unsigned int cpu) 2577static int mce_cpu_pre_down(unsigned int cpu)
2585{ 2578{
2586 struct timer_list *t = &per_cpu(mce_timer, cpu); 2579 struct timer_list *t = this_cpu_ptr(&mce_timer);
2587 2580
2588 mce_disable_cpu(); 2581 mce_disable_cpu();
2589 del_timer_sync(t); 2582 del_timer_sync(t);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 6a31e2691f3a..079e81733a58 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
384reget: 384reget:
385 if (!get_builtin_microcode(&cp, family)) { 385 if (!get_builtin_microcode(&cp, family)) {
386#ifdef CONFIG_BLK_DEV_INITRD 386#ifdef CONFIG_BLK_DEV_INITRD
387 cp = find_cpio_data(ucode_path, (void *)initrd_start, 387 if (!initrd_gone)
388 initrd_end - initrd_start, NULL); 388 cp = find_cpio_data(ucode_path, (void *)initrd_start,
389 initrd_end - initrd_start, NULL);
389#endif 390#endif
390 if (!(cp.data && cp.size)) { 391 if (!(cp.data && cp.size)) {
391 /* 392 /*
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2af69d27da62..73102d932760 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -46,6 +46,8 @@
46static struct microcode_ops *microcode_ops; 46static struct microcode_ops *microcode_ops;
47static bool dis_ucode_ldr = true; 47static bool dis_ucode_ldr = true;
48 48
49bool initrd_gone;
50
49LIST_HEAD(microcode_cache); 51LIST_HEAD(microcode_cache);
50 52
51/* 53/*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
190static int __init save_microcode_in_initrd(void) 192static int __init save_microcode_in_initrd(void)
191{ 193{
192 struct cpuinfo_x86 *c = &boot_cpu_data; 194 struct cpuinfo_x86 *c = &boot_cpu_data;
195 int ret = -EINVAL;
193 196
194 switch (c->x86_vendor) { 197 switch (c->x86_vendor) {
195 case X86_VENDOR_INTEL: 198 case X86_VENDOR_INTEL:
196 if (c->x86 >= 6) 199 if (c->x86 >= 6)
197 return save_microcode_in_initrd_intel(); 200 ret = save_microcode_in_initrd_intel();
198 break; 201 break;
199 case X86_VENDOR_AMD: 202 case X86_VENDOR_AMD:
200 if (c->x86 >= 0x10) 203 if (c->x86 >= 0x10)
201 return save_microcode_in_initrd_amd(c->x86); 204 ret = save_microcode_in_initrd_amd(c->x86);
202 break; 205 break;
203 default: 206 default:
204 break; 207 break;
205 } 208 }
206 209
207 return -EINVAL; 210 initrd_gone = true;
211
212 return ret;
208} 213}
209 214
210struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) 215struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
247 * has the virtual address of the beginning of the initrd. It also 252 * has the virtual address of the beginning of the initrd. It also
248 * possibly relocates the ramdisk. In either case, initrd_start contains 253 * possibly relocates the ramdisk. In either case, initrd_start contains
249 * the updated address so use that instead. 254 * the updated address so use that instead.
255 *
256 * initrd_gone is for the hotplug case where we've thrown out initrd
257 * already.
250 */ 258 */
251 if (!use_pa && initrd_start) 259 if (!use_pa) {
252 start = initrd_start; 260 if (initrd_gone)
261 return (struct cpio_data){ NULL, 0, "" };
262 if (initrd_start)
263 start = initrd_start;
264 }
253 265
254 return find_cpio_data(path, (void *)start, size, NULL); 266 return find_cpio_data(path, (void *)start, size, NULL);
255#else /* !CONFIG_BLK_DEV_INITRD */ 267#else /* !CONFIG_BLK_DEV_INITRD */
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 3f329b74e040..8325d8a09ab0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -41,7 +41,7 @@
41 41
42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
43 43
44/* Current microcode patch used in early patching */ 44/* Current microcode patch used in early patching on the APs. */
45struct microcode_intel *intel_ucode_patch; 45struct microcode_intel *intel_ucode_patch;
46 46
47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
607 struct ucode_cpu_info uci; 607 struct ucode_cpu_info uci;
608 struct cpio_data cp; 608 struct cpio_data cp;
609 609
610 /*
611 * AP loading didn't find any microcode patch, no need to save anything.
612 */
613 if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
614 return 0;
615
616 if (!load_builtin_intel_microcode(&cp)) 610 if (!load_builtin_intel_microcode(&cp))
617 cp = find_microcode_in_initrd(ucode_path, false); 611 cp = find_microcode_in_initrd(ucode_path, false);
618 612
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
628 return 0; 622 return 0;
629} 623}
630 624
631
632/* 625/*
633 * @res_patch, output: a pointer to the patch we found. 626 * @res_patch, output: a pointer to the patch we found.
634 */ 627 */
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e4e97a5355ce..de7234401275 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -9,6 +9,7 @@
9#include <asm/fpu/regset.h> 9#include <asm/fpu/regset.h>
10#include <asm/fpu/signal.h> 10#include <asm/fpu/signal.h>
11#include <asm/fpu/types.h> 11#include <asm/fpu/types.h>
12#include <asm/fpu/xstate.h>
12#include <asm/traps.h> 13#include <asm/traps.h>
13 14
14#include <linux/hardirq.h> 15#include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
183 * it will #GP. Make sure it is replaced after the memset(). 184 * it will #GP. Make sure it is replaced after the memset().
184 */ 185 */
185 if (static_cpu_has(X86_FEATURE_XSAVES)) 186 if (static_cpu_has(X86_FEATURE_XSAVES))
186 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; 187 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
188 xfeatures_mask;
187 189
188 if (static_cpu_has(X86_FEATURE_FXSR)) 190 if (static_cpu_has(X86_FEATURE_FXSR))
189 fpstate_init_fxstate(&state->fxsave); 191 fpstate_init_fxstate(&state->fxsave);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 85e87b46c318..dc6ba5bda9fc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
352 } else { 352 } else {
353 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 353 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
354 354
355 irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
355 irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); 356 irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
356 disable_irq(hdev->irq); 357 disable_irq(hdev->irq);
357 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 358 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d153be8929a6..e52c9088660f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3182 memcpy(dest, xsave, XSAVE_HDR_OFFSET); 3182 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3183 3183
3184 /* Set XSTATE_BV */ 3184 /* Set XSTATE_BV */
3185 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3185 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; 3186 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3186 3187
3187 /* 3188 /*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 319148bd4b05..2f25a363068c 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
269 efi_scratch.use_pgd = true; 269 efi_scratch.use_pgd = true;
270 270
271 /* 271 /*
272 * Certain firmware versions are way too sentimential and still believe
273 * they are exclusive and unquestionable owners of the first physical page,
274 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
275 * (but then write-access it later during SetVirtualAddressMap()).
276 *
277 * Create a 1:1 mapping for this page, to avoid triple faults during early
278 * boot with such firmware. We are free to hand this page to the BIOS,
279 * as trim_bios_range() will reserve the first page and isolate it away
280 * from memory allocators anyway.
281 */
282 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
283 pr_err("Failed to create 1:1 mapping for the first page!\n");
284 return 1;
285 }
286
287 /*
272 * When making calls to the firmware everything needs to be 1:1 288 * When making calls to the firmware everything needs to be 1:1
273 * mapped and addressable with 32-bit pointers. Map the kernel 289 * mapped and addressable with 32-bit pointers. Map the kernel
274 * text and allocate a new stack because we can't rely on the 290 * text and allocate a new stack because we can't rely on the
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 848e8568fb3c..8fd4be610607 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
419 419
420void cpu_reset(void) 420void cpu_reset(void)
421{ 421{
422#if XCHAL_HAVE_PTP_MMU 422#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
423 local_irq_disable(); 423 local_irq_disable();
424 /* 424 /*
425 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must 425 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
diff --git a/crypto/algapi.c b/crypto/algapi.c
index df939b54b09f..1fad2a6b3bbb 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
356 struct crypto_larval *larval; 356 struct crypto_larval *larval;
357 int err; 357 int err;
358 358
359 alg->cra_flags &= ~CRYPTO_ALG_DEAD;
359 err = crypto_check_alg(alg); 360 err = crypto_check_alg(alg);
360 if (err) 361 if (err)
361 return err; 362 return err;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index f849311e9fd4..533265f110e0 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
661unlock: 661unlock:
662 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { 662 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
663 af_alg_free_sg(&rsgl->sgl); 663 af_alg_free_sg(&rsgl->sgl);
664 list_del(&rsgl->list);
664 if (rsgl != &ctx->first_rsgl) 665 if (rsgl != &ctx->first_rsgl)
665 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 666 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
666 list_del(&rsgl->list);
667 } 667 }
668 INIT_LIST_HEAD(&ctx->list); 668 INIT_LIST_HEAD(&ctx->list);
669 aead_wmem_wakeup(sk); 669 aead_wmem_wakeup(sk);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 82b0b5710979..b0399e8f6d27 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
852 852
853 ACPI_FUNCTION_TRACE(tb_install_and_load_table); 853 ACPI_FUNCTION_TRACE(tb_install_and_load_table);
854 854
855 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
856
857 /* Install the table and load it into the namespace */ 855 /* Install the table and load it into the namespace */
858 856
859 status = acpi_tb_install_standard_table(address, flags, TRUE, 857 status = acpi_tb_install_standard_table(address, flags, TRUE,
860 override, &i); 858 override, &i);
861 if (ACPI_FAILURE(status)) { 859 if (ACPI_FAILURE(status)) {
862 goto unlock_and_exit; 860 goto exit;
863 } 861 }
864 862
865 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
866 status = acpi_tb_load_table(i, acpi_gbl_root_node); 863 status = acpi_tb_load_table(i, acpi_gbl_root_node);
867 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
868 864
869unlock_and_exit: 865exit:
870 *table_index = i; 866 *table_index = i;
871 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
872 return_ACPI_STATUS(status); 867 return_ACPI_STATUS(status);
873} 868}
874 869
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5fdf251a9f97..01e1b3d63fc0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
217 goto release_and_exit; 217 goto release_and_exit;
218 } 218 }
219 219
220 /* Acquire the table lock */
221
222 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
223
220 if (reload) { 224 if (reload) {
221 /* 225 /*
222 * Validate the incoming table signature. 226 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
244 new_table_desc.signature.integer)); 248 new_table_desc.signature.integer));
245 249
246 status = AE_BAD_SIGNATURE; 250 status = AE_BAD_SIGNATURE;
247 goto release_and_exit; 251 goto unlock_and_exit;
248 } 252 }
249 253
250 /* Check if table is already registered */ 254 /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
279 /* Table is still loaded, this is an error */ 283 /* Table is still loaded, this is an error */
280 284
281 status = AE_ALREADY_EXISTS; 285 status = AE_ALREADY_EXISTS;
282 goto release_and_exit; 286 goto unlock_and_exit;
283 } else { 287 } else {
284 /* 288 /*
285 * Table was unloaded, allow it to be reloaded. 289 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
290 * indicate the re-installation. 294 * indicate the re-installation.
291 */ 295 */
292 acpi_tb_uninstall_table(&new_table_desc); 296 acpi_tb_uninstall_table(&new_table_desc);
297 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
293 *table_index = i; 298 *table_index = i;
294 return_ACPI_STATUS(AE_OK); 299 return_ACPI_STATUS(AE_OK);
295 } 300 }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
303 308
304 /* Invoke table handler if present */ 309 /* Invoke table handler if present */
305 310
311 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
306 if (acpi_gbl_table_handler) { 312 if (acpi_gbl_table_handler) {
307 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, 313 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
308 new_table_desc.pointer, 314 new_table_desc.pointer,
309 acpi_gbl_table_handler_context); 315 acpi_gbl_table_handler_context);
310 } 316 }
317 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
318
319unlock_and_exit:
320
321 /* Release the table lock */
322
323 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
311 324
312release_and_exit: 325release_and_exit:
313 326
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9b6cebe227a0..54abb26b7366 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
674 if (acpi_sleep_state_supported(i)) 674 if (acpi_sleep_state_supported(i))
675 sleep_states[i] = 1; 675 sleep_states[i] = 1;
676 676
677 /*
678 * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
679 * the default suspend mode was not selected from the command line.
680 */
681 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
682 mem_sleep_default > PM_SUSPEND_MEM)
683 mem_sleep_default = PM_SUSPEND_FREEZE;
684
685 suspend_set_ops(old_suspend_ordering ? 677 suspend_set_ops(old_suspend_ordering ?
686 &acpi_suspend_ops_old : &acpi_suspend_ops); 678 &acpi_suspend_ops_old : &acpi_suspend_ops);
687 freeze_set_ops(&acpi_freeze_ops); 679 freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25c82e4..7f48156cbc0c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
305 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), 305 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
306 }, 306 },
307 }, 307 },
308 {
309 /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
310 /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
311 .callback = video_detect_force_native,
312 .ident = "HP Pavilion dv6",
313 .matches = {
314 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
315 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
316 },
317 },
318
319 { }, 308 { },
320}; 309};
321 310
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9cd0a2d41816..c2d3785ec227 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1702 1702
1703 if (qc->err_mask & ~AC_ERR_OTHER) 1703 if (qc->err_mask & ~AC_ERR_OTHER)
1704 qc->err_mask &= ~AC_ERR_OTHER; 1704 qc->err_mask &= ~AC_ERR_OTHER;
1705 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706 qc->result_tf.command |= ATA_SENSE;
1705 } 1707 }
1706 1708
1707 /* finish up */ 1709 /* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4356 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4358 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4357 4359
4358 /* 4360 /*
4359 * Device times out with higher max sects. 4361 * These devices time out with higher max sects.
4360 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4362 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4361 */ 4363 */
4362 { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4364 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4363 4365
4364 /* Devices we expect to fail diagnostics */ 4366 /* Devices we expect to fail diagnostics */
4365 4367
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 823e938c9a78..2f32782cea6d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
4132 host->iomap = NULL; 4132 host->iomap = NULL;
4133 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4133 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4134 resource_size(res)); 4134 resource_size(res));
4135 if (!hpriv->base)
4136 return -ENOMEM;
4137
4135 hpriv->base -= SATAHC0_REG_BASE; 4138 hpriv->base -= SATAHC0_REG_BASE;
4136 4139
4137 hpriv->clk = clk_get(&pdev->dev, NULL); 4140 hpriv->clk = clk_get(&pdev->dev, NULL);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4497d263209f..ac350c518e0c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
558 struct firmware_buf *buf = fw_priv->buf; 558 struct firmware_buf *buf = fw_priv->buf;
559 559
560 __fw_load_abort(buf); 560 __fw_load_abort(buf);
561
562 /* avoid user action after loading abort */
563 fw_priv->buf = NULL;
564} 561}
565 562
566static LIST_HEAD(pending_fw_head); 563static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
713 710
714 mutex_lock(&fw_lock); 711 mutex_lock(&fw_lock);
715 fw_buf = fw_priv->buf; 712 fw_buf = fw_priv->buf;
716 if (!fw_buf) 713 if (fw_state_is_aborted(&fw_buf->fw_st))
717 goto out; 714 goto out;
718 715
719 switch (loading) { 716 switch (loading) {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1253e6..fa26ffd25fa6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
389{ 389{
390 struct memory_block *mem = to_memory_block(dev); 390 struct memory_block *mem = to_memory_block(dev);
391 unsigned long start_pfn, end_pfn; 391 unsigned long start_pfn, end_pfn;
392 unsigned long valid_start, valid_end, valid_pages;
392 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 393 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
393 struct page *first_page;
394 struct zone *zone; 394 struct zone *zone;
395 int zone_shift = 0; 395 int zone_shift = 0;
396 396
397 start_pfn = section_nr_to_pfn(mem->start_section_nr); 397 start_pfn = section_nr_to_pfn(mem->start_section_nr);
398 end_pfn = start_pfn + nr_pages; 398 end_pfn = start_pfn + nr_pages;
399 first_page = pfn_to_page(start_pfn);
400 399
401 /* The block contains more than one zone can not be offlined. */ 400 /* The block contains more than one zone can not be offlined. */
402 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 401 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
403 return sprintf(buf, "none\n"); 402 return sprintf(buf, "none\n");
404 403
405 zone = page_zone(first_page); 404 zone = page_zone(pfn_to_page(valid_start));
405 valid_pages = valid_end - valid_start;
406 406
407 /* MMOP_ONLINE_KEEP */ 407 /* MMOP_ONLINE_KEEP */
408 sprintf(buf, "%s", zone->name); 408 sprintf(buf, "%s", zone->name);
409 409
410 /* MMOP_ONLINE_KERNEL */ 410 /* MMOP_ONLINE_KERNEL */
411 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); 411 zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
412 if (zone_shift) { 412 if (zone_shift) {
413 strcat(buf, " "); 413 strcat(buf, " ");
414 strcat(buf, (zone + zone_shift)->name); 414 strcat(buf, (zone + zone_shift)->name);
415 } 415 }
416 416
417 /* MMOP_ONLINE_MOVABLE */ 417 /* MMOP_ONLINE_MOVABLE */
418 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); 418 zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
419 if (zone_shift) { 419 if (zone_shift) {
420 strcat(buf, " "); 420 strcat(buf, " ");
421 strcat(buf, (zone + zone_shift)->name); 421 strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 872eac4cb1df..a14fac6a01d3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
966 unsigned long flags; 966 unsigned long flags;
967 int retval; 967 int retval;
968 968
969 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
970
971 if (rpmflags & RPM_GET_PUT) { 969 if (rpmflags & RPM_GET_PUT) {
972 if (!atomic_dec_and_test(&dev->power.usage_count)) 970 if (!atomic_dec_and_test(&dev->power.usage_count))
973 return 0; 971 return 0;
974 } 972 }
975 973
974 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
975
976 spin_lock_irqsave(&dev->power.lock, flags); 976 spin_lock_irqsave(&dev->power.lock, flags);
977 retval = rpm_idle(dev, rpmflags); 977 retval = rpm_idle(dev, rpmflags);
978 spin_unlock_irqrestore(&dev->power.lock, flags); 978 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
998 unsigned long flags; 998 unsigned long flags;
999 int retval; 999 int retval;
1000 1000
1001 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1002
1003 if (rpmflags & RPM_GET_PUT) { 1001 if (rpmflags & RPM_GET_PUT) {
1004 if (!atomic_dec_and_test(&dev->power.usage_count)) 1002 if (!atomic_dec_and_test(&dev->power.usage_count))
1005 return 0; 1003 return 0;
1006 } 1004 }
1007 1005
1006 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1007
1008 spin_lock_irqsave(&dev->power.lock, flags); 1008 spin_lock_irqsave(&dev->power.lock, flags);
1009 retval = rpm_suspend(dev, rpmflags); 1009 retval = rpm_suspend(dev, rpmflags);
1010 spin_unlock_irqrestore(&dev->power.lock, flags); 1010 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
1029 unsigned long flags; 1029 unsigned long flags;
1030 int retval; 1030 int retval;
1031 1031
1032 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1032 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1033 dev->power.runtime_status != RPM_ACTIVE);
1033 1034
1034 if (rpmflags & RPM_GET_PUT) 1035 if (rpmflags & RPM_GET_PUT)
1035 atomic_inc(&dev->power.usage_count); 1036 atomic_inc(&dev->power.usage_count);
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index f642c4264c27..168fa175d65a 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); 45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); 46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); 47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
48#ifdef CONFIG_BCMA_DRIVER_MIPS
49void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
50#endif /* CONFIG_BCMA_DRIVER_MIPS */
48 51
49/* driver_chipcommon_b.c */ 52/* driver_chipcommon_b.c */
50int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); 53int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b4f6520e74f0..62f5bfa5065d 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,8 +15,6 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h> 16#include <linux/bcma/bcma.h>
17 17
18static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
19
20static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, 18static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
21 u32 mask, u32 value) 19 u32 mask, u32 value)
22{ 20{
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
186 if (cc->capabilities & BCMA_CC_CAP_PMU) 184 if (cc->capabilities & BCMA_CC_CAP_PMU)
187 bcma_pmu_early_init(cc); 185 bcma_pmu_early_init(cc);
188 186
189 if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
190 bcma_chipco_serial_init(cc);
191
192 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 187 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
193 bcma_core_chipcommon_flash_detect(cc); 188 bcma_core_chipcommon_flash_detect(cc);
194 189
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
378 return res; 373 return res;
379} 374}
380 375
381static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) 376#ifdef CONFIG_BCMA_DRIVER_MIPS
377void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
382{ 378{
383#if IS_BUILTIN(CONFIG_BCM47XX)
384 unsigned int irq; 379 unsigned int irq;
385 u32 baud_base; 380 u32 baud_base;
386 u32 i; 381 u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
422 ports[i].baud_base = baud_base; 417 ports[i].baud_base = baud_base;
423 ports[i].reg_shift = 0; 418 ports[i].reg_shift = 0;
424 } 419 }
425#endif /* CONFIG_BCM47XX */
426} 420}
421#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 96f171328200..89af807cf29c 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
278 278
279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) 279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
280{ 280{
281 struct bcma_bus *bus = mcore->core->bus;
282
281 if (mcore->early_setup_done) 283 if (mcore->early_setup_done)
282 return; 284 return;
283 285
286 bcma_chipco_serial_init(&bus->drv_cc);
284 bcma_core_mips_nvram_init(mcore); 287 bcma_core_mips_nvram_init(mcore);
285 288
286 mcore->early_setup_done = true; 289 mcore->early_setup_done = true;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b2bdfa81f929..265f1a7072e9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -197,13 +197,13 @@ struct blkfront_info
197 /* Number of pages per ring buffer. */ 197 /* Number of pages per ring buffer. */
198 unsigned int nr_ring_pages; 198 unsigned int nr_ring_pages;
199 struct request_queue *rq; 199 struct request_queue *rq;
200 unsigned int feature_flush; 200 unsigned int feature_flush:1;
201 unsigned int feature_fua; 201 unsigned int feature_fua:1;
202 unsigned int feature_discard:1; 202 unsigned int feature_discard:1;
203 unsigned int feature_secdiscard:1; 203 unsigned int feature_secdiscard:1;
204 unsigned int feature_persistent:1;
204 unsigned int discard_granularity; 205 unsigned int discard_granularity;
205 unsigned int discard_alignment; 206 unsigned int discard_alignment;
206 unsigned int feature_persistent:1;
207 /* Number of 4KB segments handled */ 207 /* Number of 4KB segments handled */
208 unsigned int max_indirect_segments; 208 unsigned int max_indirect_segments;
209 int is_ready; 209 int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2223 } 2223 }
2224 else 2224 else
2225 grants = info->max_indirect_segments; 2225 grants = info->max_indirect_segments;
2226 psegs = grants / GRANTS_PER_PSEG; 2226 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2227 2227
2228 err = fill_grant_buffer(rinfo, 2228 err = fill_grant_buffer(rinfo,
2229 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); 2229 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2323 blkfront_setup_discard(info); 2323 blkfront_setup_discard(info);
2324 2324
2325 info->feature_persistent = 2325 info->feature_persistent =
2326 xenbus_read_unsigned(info->xbdev->otherend, 2326 !!xenbus_read_unsigned(info->xbdev->otherend,
2327 "feature-persistent", 0); 2327 "feature-persistent", 0);
2328 2328
2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, 2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2330 "feature-max-indirect-segments", 0); 2330 "feature-max-indirect-segments", 0);
2331 info->max_indirect_segments = min(indirect_segments, 2331 if (indirect_segments > xen_blkif_max_segments)
2332 xen_blkif_max_segments); 2332 indirect_segments = xen_blkif_max_segments;
2333 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2334 indirect_segments = 0;
2335 info->max_indirect_segments = indirect_segments;
2333} 2336}
2334 2337
2335/* 2338/*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
2652 if (!xen_domain()) 2655 if (!xen_domain())
2653 return -ENODEV; 2656 return -ENODEV;
2654 2657
2658 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2659 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2660
2655 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { 2661 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2656 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", 2662 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2657 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); 2663 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4fda623e55bb..c94360671f41 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
784static int brcm_avs_suspend(struct cpufreq_policy *policy) 784static int brcm_avs_suspend(struct cpufreq_policy *policy)
785{ 785{
786 struct private_data *priv = policy->driver_data; 786 struct private_data *priv = policy->driver_data;
787 int ret;
788
789 ret = brcm_avs_get_pmap(priv, &priv->pmap);
790 if (ret)
791 return ret;
787 792
788 return brcm_avs_get_pmap(priv, &priv->pmap); 793 /*
794 * We can't use the P-state returned by brcm_avs_get_pmap(), since
795 * that's the initial P-state from when the P-map was downloaded to the
796 * AVS co-processor, not necessarily the P-state we are running at now.
797 * So, we get the current P-state explicitly.
798 */
799 return brcm_avs_get_pstate(priv, &priv->pmap.state);
789} 800}
790 801
791static int brcm_avs_resume(struct cpufreq_policy *policy) 802static int brcm_avs_resume(struct cpufreq_policy *policy)
@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
954 brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); 965 brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
955 brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); 966 brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
956 967
957 return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n", 968 return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
958 pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, 969 pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
959 mdiv_p3, mdiv_p4); 970 mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
960} 971}
961 972
962static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) 973static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f91c25718d16..50bd6d987fc3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1235 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1235 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1236} 1236}
1237 1237
1238#define MSR_IA32_POWER_CTL_BIT_EE 19
1239
1240/* Disable energy efficiency optimization */
1241static void intel_pstate_disable_ee(int cpu)
1242{
1243 u64 power_ctl;
1244 int ret;
1245
1246 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
1247 if (ret)
1248 return;
1249
1250 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
1251 pr_info("Disabling energy efficiency optimization\n");
1252 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1253 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
1254 }
1255}
1256
1238static int atom_get_min_pstate(void) 1257static int atom_get_min_pstate(void)
1239{ 1258{
1240 u64 value; 1259 u64 value;
@@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1845 {} 1864 {}
1846}; 1865};
1847 1866
1867static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
1868 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
1869 {}
1870};
1871
1848static int intel_pstate_init_cpu(unsigned int cpunum) 1872static int intel_pstate_init_cpu(unsigned int cpunum)
1849{ 1873{
1850 struct cpudata *cpu; 1874 struct cpudata *cpu;
@@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1875 cpu->cpu = cpunum; 1899 cpu->cpu = cpunum;
1876 1900
1877 if (hwp_active) { 1901 if (hwp_active) {
1902 const struct x86_cpu_id *id;
1903
1904 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
1905 if (id)
1906 intel_pstate_disable_ee(cpunum);
1907
1878 intel_pstate_hwp_enable(cpu); 1908 intel_pstate_hwp_enable(cpu);
1879 pid_params.sample_rate_ms = 50; 1909 pid_params.sample_rate_ms = 50;
1880 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1910 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
@@ -2005,7 +2035,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2005 limits = &performance_limits; 2035 limits = &performance_limits;
2006 perf_limits = limits; 2036 perf_limits = limits;
2007 } 2037 }
2008 if (policy->max >= policy->cpuinfo.max_freq) { 2038 if (policy->max >= policy->cpuinfo.max_freq &&
2039 !limits->no_turbo) {
2009 pr_debug("set performance\n"); 2040 pr_debug("set performance\n");
2010 intel_pstate_set_performance_limits(perf_limits); 2041 intel_pstate_set_performance_limits(perf_limits);
2011 goto out; 2042 goto out;
@@ -2047,6 +2078,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2047 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2078 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
2048 return -EINVAL; 2079 return -EINVAL;
2049 2080
2081 /* When per-CPU limits are used, sysfs limits are not used */
2082 if (!per_cpu_limits) {
2083 unsigned int max_freq, min_freq;
2084
2085 max_freq = policy->cpuinfo.max_freq *
2086 limits->max_sysfs_pct / 100;
2087 min_freq = policy->cpuinfo.max_freq *
2088 limits->min_sysfs_pct / 100;
2089 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2090 }
2091
2050 return 0; 2092 return 0;
2051} 2093}
2052 2094
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index e2ce8190ecc9..612898b4aaad 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
959static void ccp5_config(struct ccp_device *ccp) 959static void ccp5_config(struct ccp_device *ccp)
960{ 960{
961 /* Public side */ 961 /* Public side */
962 iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); 962 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
963} 963}
964 964
965static void ccp5other_config(struct ccp_device *ccp) 965static void ccp5other_config(struct ccp_device *ccp)
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 830f35e6005f..649e5610a5ce 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -238,6 +238,7 @@ struct ccp_dma_chan {
238 struct ccp_device *ccp; 238 struct ccp_device *ccp;
239 239
240 spinlock_t lock; 240 spinlock_t lock;
241 struct list_head created;
241 struct list_head pending; 242 struct list_head pending;
242 struct list_head active; 243 struct list_head active;
243 struct list_head complete; 244 struct list_head complete;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 6553912804f7..e5d9278f4019 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
63 ccp_free_desc_resources(chan->ccp, &chan->complete); 63 ccp_free_desc_resources(chan->ccp, &chan->complete);
64 ccp_free_desc_resources(chan->ccp, &chan->active); 64 ccp_free_desc_resources(chan->ccp, &chan->active);
65 ccp_free_desc_resources(chan->ccp, &chan->pending); 65 ccp_free_desc_resources(chan->ccp, &chan->pending);
66 ccp_free_desc_resources(chan->ccp, &chan->created);
66 67
67 spin_unlock_irqrestore(&chan->lock, flags); 68 spin_unlock_irqrestore(&chan->lock, flags);
68} 69}
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
273 spin_lock_irqsave(&chan->lock, flags); 274 spin_lock_irqsave(&chan->lock, flags);
274 275
275 cookie = dma_cookie_assign(tx_desc); 276 cookie = dma_cookie_assign(tx_desc);
277 list_del(&desc->entry);
276 list_add_tail(&desc->entry, &chan->pending); 278 list_add_tail(&desc->entry, &chan->pending);
277 279
278 spin_unlock_irqrestore(&chan->lock, flags); 280 spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
426 428
427 spin_lock_irqsave(&chan->lock, sflags); 429 spin_lock_irqsave(&chan->lock, sflags);
428 430
429 list_add_tail(&desc->entry, &chan->pending); 431 list_add_tail(&desc->entry, &chan->created);
430 432
431 spin_unlock_irqrestore(&chan->lock, sflags); 433 spin_unlock_irqrestore(&chan->lock, sflags);
432 434
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
610 /*TODO: Purge the complete list? */ 612 /*TODO: Purge the complete list? */
611 ccp_free_desc_resources(chan->ccp, &chan->active); 613 ccp_free_desc_resources(chan->ccp, &chan->active);
612 ccp_free_desc_resources(chan->ccp, &chan->pending); 614 ccp_free_desc_resources(chan->ccp, &chan->pending);
615 ccp_free_desc_resources(chan->ccp, &chan->created);
613 616
614 spin_unlock_irqrestore(&chan->lock, flags); 617 spin_unlock_irqrestore(&chan->lock, flags);
615 618
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
679 chan->ccp = ccp; 682 chan->ccp = ccp;
680 683
681 spin_lock_init(&chan->lock); 684 spin_lock_init(&chan->lock);
685 INIT_LIST_HEAD(&chan->created);
682 INIT_LIST_HEAD(&chan->pending); 686 INIT_LIST_HEAD(&chan->pending);
683 INIT_LIST_HEAD(&chan->active); 687 INIT_LIST_HEAD(&chan->active);
684 INIT_LIST_HEAD(&chan->complete); 688 INIT_LIST_HEAD(&chan->complete);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 2ed1e24b44a8..b4b78b37f8a6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
158 case CRYPTO_ALG_TYPE_AEAD: 158 case CRYPTO_ALG_TYPE_AEAD:
159 ctx_req.req.aead_req = (struct aead_request *)req; 159 ctx_req.req.aead_req = (struct aead_request *)req;
160 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); 160 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, 161 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); 162 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 if (ctx_req.ctx.reqctx->skb) { 163 if (ctx_req.ctx.reqctx->skb) {
164 kfree_skb(ctx_req.ctx.reqctx->skb); 164 kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1362 struct chcr_wr *chcr_req; 1362 struct chcr_wr *chcr_req;
1363 struct cpl_rx_phys_dsgl *phys_cpl; 1363 struct cpl_rx_phys_dsgl *phys_cpl;
1364 struct phys_sge_parm sg_param; 1364 struct phys_sge_parm sg_param;
1365 struct scatterlist *src, *dst; 1365 struct scatterlist *src;
1366 struct scatterlist src_sg[2], dst_sg[2];
1367 unsigned int frags = 0, transhdr_len; 1366 unsigned int frags = 0, transhdr_len;
1368 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; 1367 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1369 unsigned int kctx_len = 0; 1368 unsigned int kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1383 1382
1384 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1383 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1385 goto err; 1384 goto err;
1386 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1385 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1387 dst = src; 1386 reqctx->dst = src;
1387
1388 if (req->src != req->dst) { 1388 if (req->src != req->dst) {
1389 err = chcr_copy_assoc(req, aeadctx); 1389 err = chcr_copy_assoc(req, aeadctx);
1390 if (err) 1390 if (err)
1391 return ERR_PTR(err); 1391 return ERR_PTR(err);
1392 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1392 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1393 req->assoclen);
1393 } 1394 }
1394 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { 1395 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1395 null = 1; 1396 null = 1;
1396 assoclen = 0; 1397 assoclen = 0;
1397 } 1398 }
1398 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1399 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1399 (op_type ? -authsize : authsize)); 1400 (op_type ? -authsize : authsize));
1400 if (reqctx->dst_nents <= 0) { 1401 if (reqctx->dst_nents <= 0) {
1401 pr_err("AUTHENC:Invalid Destination sg entries\n"); 1402 pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1460 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1461 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1461 sg_param.qid = qid; 1462 sg_param.qid = qid;
1462 sg_param.align = 0; 1463 sg_param.align = 0;
1463 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1464 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1464 &sg_param)) 1465 &sg_param))
1465 goto dstmap_fail; 1466 goto dstmap_fail;
1466 1467
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1711 struct chcr_wr *chcr_req; 1712 struct chcr_wr *chcr_req;
1712 struct cpl_rx_phys_dsgl *phys_cpl; 1713 struct cpl_rx_phys_dsgl *phys_cpl;
1713 struct phys_sge_parm sg_param; 1714 struct phys_sge_parm sg_param;
1714 struct scatterlist *src, *dst; 1715 struct scatterlist *src;
1715 struct scatterlist src_sg[2], dst_sg[2];
1716 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; 1716 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1717 unsigned int dst_size = 0, kctx_len; 1717 unsigned int dst_size = 0, kctx_len;
1718 unsigned int sub_type; 1718 unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1728 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1728 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1729 goto err; 1729 goto err;
1730 sub_type = get_aead_subtype(tfm); 1730 sub_type = get_aead_subtype(tfm);
1731 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1731 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1732 dst = src; 1732 reqctx->dst = src;
1733
1733 if (req->src != req->dst) { 1734 if (req->src != req->dst) {
1734 err = chcr_copy_assoc(req, aeadctx); 1735 err = chcr_copy_assoc(req, aeadctx);
1735 if (err) { 1736 if (err) {
1736 pr_err("AAD copy to destination buffer fails\n"); 1737 pr_err("AAD copy to destination buffer fails\n");
1737 return ERR_PTR(err); 1738 return ERR_PTR(err);
1738 } 1739 }
1739 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1740 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1741 req->assoclen);
1740 } 1742 }
1741 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1743 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1742 (op_type ? -authsize : authsize)); 1744 (op_type ? -authsize : authsize));
1743 if (reqctx->dst_nents <= 0) { 1745 if (reqctx->dst_nents <= 0) {
1744 pr_err("CCM:Invalid Destination sg entries\n"); 1746 pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1777 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1779 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1778 sg_param.qid = qid; 1780 sg_param.qid = qid;
1779 sg_param.align = 0; 1781 sg_param.align = 0;
1780 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1782 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1781 &sg_param)) 1783 &sg_param))
1782 goto dstmap_fail; 1784 goto dstmap_fail;
1783 1785
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1809 struct chcr_wr *chcr_req; 1811 struct chcr_wr *chcr_req;
1810 struct cpl_rx_phys_dsgl *phys_cpl; 1812 struct cpl_rx_phys_dsgl *phys_cpl;
1811 struct phys_sge_parm sg_param; 1813 struct phys_sge_parm sg_param;
1812 struct scatterlist *src, *dst; 1814 struct scatterlist *src;
1813 struct scatterlist src_sg[2], dst_sg[2];
1814 unsigned int frags = 0, transhdr_len; 1815 unsigned int frags = 0, transhdr_len;
1815 unsigned int ivsize = AES_BLOCK_SIZE; 1816 unsigned int ivsize = AES_BLOCK_SIZE;
1816 unsigned int dst_size = 0, kctx_len; 1817 unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1832 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1833 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1833 goto err; 1834 goto err;
1834 1835
1835 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1836 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1836 dst = src; 1837 reqctx->dst = src;
1837 if (req->src != req->dst) { 1838 if (req->src != req->dst) {
1838 err = chcr_copy_assoc(req, aeadctx); 1839 err = chcr_copy_assoc(req, aeadctx);
1839 if (err) 1840 if (err)
1840 return ERR_PTR(err); 1841 return ERR_PTR(err);
1841 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1842 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1843 req->assoclen);
1842 } 1844 }
1843 1845
1844 if (!req->cryptlen) 1846 if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1848 crypt_len = AES_BLOCK_SIZE; 1850 crypt_len = AES_BLOCK_SIZE;
1849 else 1851 else
1850 crypt_len = req->cryptlen; 1852 crypt_len = req->cryptlen;
1851 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1853 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1852 (op_type ? -authsize : authsize)); 1854 (op_type ? -authsize : authsize));
1853 if (reqctx->dst_nents <= 0) { 1855 if (reqctx->dst_nents <= 0) {
1854 pr_err("GCM:Invalid Destination sg entries\n"); 1856 pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1923 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1925 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1924 sg_param.qid = qid; 1926 sg_param.qid = qid;
1925 sg_param.align = 0; 1927 sg_param.align = 0;
1926 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1928 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1927 &sg_param)) 1929 &sg_param))
1928 goto dstmap_fail; 1930 goto dstmap_fail;
1929 1931
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1937 write_sg_to_skb(skb, &frags, src, req->cryptlen); 1939 write_sg_to_skb(skb, &frags, src, req->cryptlen);
1938 } else { 1940 } else {
1939 aes_gcm_empty_pld_pad(req->dst, authsize - 1); 1941 aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1940 write_sg_to_skb(skb, &frags, dst, crypt_len); 1942 write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1943
1941 } 1944 }
1942 1945
1943 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, 1946 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2189 unsigned int ck_size; 2192 unsigned int ck_size;
2190 int ret = 0, key_ctx_size = 0; 2193 int ret = 0, key_ctx_size = 0;
2191 2194
2192 if (get_aead_subtype(aead) == 2195 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2193 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { 2196 keylen > 3) {
2194 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 2197 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
2195 memcpy(aeadctx->salt, key + keylen, 4); 2198 memcpy(aeadctx->salt, key + keylen, 4);
2196 } 2199 }
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 918da8e6e2d8..1c65f07e1cc9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
52int assign_chcr_device(struct chcr_dev **dev) 52int assign_chcr_device(struct chcr_dev **dev)
53{ 53{
54 struct uld_ctx *u_ctx; 54 struct uld_ctx *u_ctx;
55 int ret = -ENXIO;
55 56
56 /* 57 /*
57 * Which device to use if multiple devices are available TODO 58 * Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
59 * must go to the same device to maintain the ordering. 60 * must go to the same device to maintain the ordering.
60 */ 61 */
61 mutex_lock(&dev_mutex); /* TODO ? */ 62 mutex_lock(&dev_mutex); /* TODO ? */
62 u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); 63 list_for_each_entry(u_ctx, &uld_ctx_list, entry)
63 if (!u_ctx) { 64 if (u_ctx && u_ctx->dev) {
64 mutex_unlock(&dev_mutex); 65 *dev = u_ctx->dev;
65 return -ENXIO; 66 ret = 0;
67 break;
66 } 68 }
67
68 *dev = u_ctx->dev;
69 mutex_unlock(&dev_mutex); 69 mutex_unlock(&dev_mutex);
70 return 0; 70 return ret;
71} 71}
72 72
73static int chcr_dev_add(struct uld_ctx *u_ctx) 73static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
202 202
203static int __init chcr_crypto_init(void) 203static int __init chcr_crypto_init(void)
204{ 204{
205 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { 205 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
206 pr_err("ULD register fail: No chcr crypto support in cxgb4"); 206 pr_err("ULD register fail: No chcr crypto support in cxgb4");
207 return -1;
208 }
209 207
210 return 0; 208 return 0;
211} 209}
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d5af7d64a763..7ec0a8f12475 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -158,6 +158,9 @@ struct ablk_ctx {
158}; 158};
159struct chcr_aead_reqctx { 159struct chcr_aead_reqctx {
160 struct sk_buff *skb; 160 struct sk_buff *skb;
161 struct scatterlist *dst;
162 struct scatterlist srcffwd[2];
163 struct scatterlist dstffwd[2];
161 short int dst_nents; 164 short int dst_nents;
162 u16 verify; 165 u16 verify;
163 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 166 u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index bc5cbc193aae..5b2d78a5b5aa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
233 &hw_data->accel_capabilities_mask); 233 &hw_data->accel_capabilities_mask);
234 234
235 /* Find and map all the device's BARS */ 235 /* Find and map all the device's BARS */
236 i = 0; 236 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
237 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 237 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
238 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 238 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
239 ADF_PCI_MAX_BARS * 2) { 239 ADF_PCI_MAX_BARS * 2) {
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index e8822536530b..33f0a6251e38 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -69,6 +69,7 @@
69#define ADF_ERRSOU5 (0x3A000 + 0xD8) 69#define ADF_ERRSOU5 (0x3A000 + 0xD8)
70#define ADF_DEVICE_FUSECTL_OFFSET 0x40 70#define ADF_DEVICE_FUSECTL_OFFSET 0x40
71#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C 71#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
72#define ADF_DEVICE_FUSECTL_MASK 0x80000000
72#define ADF_PCI_MAX_BARS 3 73#define ADF_PCI_MAX_BARS 3
73#define ADF_DEVICE_NAME_LENGTH 32 74#define ADF_DEVICE_NAME_LENGTH 32
74#define ADF_ETR_MAX_RINGS_PER_BANK 16 75#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 1e480f140663..8c4fd255a601 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
456 unsigned int csr_val; 456 unsigned int csr_val;
457 int times = 30; 457 int times = 30;
458 458
459 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) 459 if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
460 return 0; 460 return 0;
461 461
462 csr_val = ADF_CSR_RD(csr_addr, 0); 462 csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
716 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + 716 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
717 LOCAL_TO_XFER_REG_OFFSET); 717 LOCAL_TO_XFER_REG_OFFSET);
718 handle->pci_dev = pci_info->pci_dev; 718 handle->pci_dev = pci_info->pci_dev;
719 if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { 719 if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
720 sram_bar = 720 sram_bar =
721 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; 721 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
722 handle->hal_sram_addr_v = sram_bar->virt_addr; 722 handle->hal_sram_addr_v = sram_bar->virt_addr;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d5ba43a87a68..200828c60db9 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,8 @@ struct cppi41_dd {
153 153
154 /* context for suspend/resume */ 154 /* context for suspend/resume */
155 unsigned int dma_tdfdq; 155 unsigned int dma_tdfdq;
156
157 bool is_suspended;
156}; 158};
157 159
158#define FIST_COMPLETION_QUEUE 93 160#define FIST_COMPLETION_QUEUE 93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
257 BUG_ON(desc_num >= ALLOC_DECS_NUM); 259 BUG_ON(desc_num >= ALLOC_DECS_NUM);
258 c = cdd->chan_busy[desc_num]; 260 c = cdd->chan_busy[desc_num];
259 cdd->chan_busy[desc_num] = NULL; 261 cdd->chan_busy[desc_num] = NULL;
262
263 /* Usecount for chan_busy[], paired with push_desc_queue() */
264 pm_runtime_put(cdd->ddev.dev);
265
260 return c; 266 return c;
261} 267}
262 268
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
317 323
318 while (val) { 324 while (val) {
319 u32 desc, len; 325 u32 desc, len;
320 int error;
321 326
322 error = pm_runtime_get(cdd->ddev.dev); 327 /*
323 if (error < 0) 328 * This should never trigger, see the comments in
324 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 329 * push_desc_queue()
325 __func__, error); 330 */
331 WARN_ON(cdd->is_suspended);
326 332
327 q_num = __fls(val); 333 q_num = __fls(val);
328 val &= ~(1 << q_num); 334 val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
343 c->residue = pd_trans_len(c->desc->pd6) - len; 349 c->residue = pd_trans_len(c->desc->pd6) - len;
344 dma_cookie_complete(&c->txd); 350 dma_cookie_complete(&c->txd);
345 dmaengine_desc_get_callback_invoke(&c->txd, NULL); 351 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
346
347 pm_runtime_mark_last_busy(cdd->ddev.dev);
348 pm_runtime_put_autosuspend(cdd->ddev.dev);
349 } 352 }
350 } 353 }
351 return IRQ_HANDLED; 354 return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
447 */ 450 */
448 __iowmb(); 451 __iowmb();
449 452
453 /*
454 * DMA transfers can take at least 200ms to complete with USB mass
455 * storage connected. To prevent autosuspend timeouts, we must use
456 * pm_runtime_get/put() when chan_busy[] is modified. This will get
457 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
458 * outcome of the transfer.
459 */
460 pm_runtime_get(cdd->ddev.dev);
461
450 desc_phys = lower_32_bits(c->desc_phys); 462 desc_phys = lower_32_bits(c->desc_phys);
451 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); 463 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
452 WARN_ON(cdd->chan_busy[desc_num]); 464 WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
457 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 469 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
458} 470}
459 471
460static void pending_desc(struct cppi41_channel *c) 472/*
473 * Caller must hold cdd->lock to prevent push_desc_queue()
474 * getting called out of order. We have both cppi41_dma_issue_pending()
475 * and cppi41_runtime_resume() call this function.
476 */
477static void cppi41_run_queue(struct cppi41_dd *cdd)
461{ 478{
462 struct cppi41_dd *cdd = c->cdd; 479 struct cppi41_channel *c, *_c;
463 unsigned long flags;
464 480
465 spin_lock_irqsave(&cdd->lock, flags); 481 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
466 list_add_tail(&c->node, &cdd->pending); 482 push_desc_queue(c);
467 spin_unlock_irqrestore(&cdd->lock, flags); 483 list_del(&c->node);
484 }
468} 485}
469 486
470static void cppi41_dma_issue_pending(struct dma_chan *chan) 487static void cppi41_dma_issue_pending(struct dma_chan *chan)
471{ 488{
472 struct cppi41_channel *c = to_cpp41_chan(chan); 489 struct cppi41_channel *c = to_cpp41_chan(chan);
473 struct cppi41_dd *cdd = c->cdd; 490 struct cppi41_dd *cdd = c->cdd;
491 unsigned long flags;
474 int error; 492 int error;
475 493
476 error = pm_runtime_get(cdd->ddev.dev); 494 error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
482 return; 500 return;
483 } 501 }
484 502
485 if (likely(pm_runtime_active(cdd->ddev.dev))) 503 spin_lock_irqsave(&cdd->lock, flags);
486 push_desc_queue(c); 504 list_add_tail(&c->node, &cdd->pending);
487 else 505 if (!cdd->is_suspended)
488 pending_desc(c); 506 cppi41_run_queue(cdd);
507 spin_unlock_irqrestore(&cdd->lock, flags);
489 508
490 pm_runtime_mark_last_busy(cdd->ddev.dev); 509 pm_runtime_mark_last_busy(cdd->ddev.dev);
491 pm_runtime_put_autosuspend(cdd->ddev.dev); 510 pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
705 WARN_ON(!cdd->chan_busy[desc_num]); 724 WARN_ON(!cdd->chan_busy[desc_num]);
706 cdd->chan_busy[desc_num] = NULL; 725 cdd->chan_busy[desc_num] = NULL;
707 726
727 /* Usecount for chan_busy[], paired with push_desc_queue() */
728 pm_runtime_put(cdd->ddev.dev);
729
708 return 0; 730 return 0;
709} 731}
710 732
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
1150static int __maybe_unused cppi41_runtime_suspend(struct device *dev) 1172static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1151{ 1173{
1152 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1174 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1175 unsigned long flags;
1153 1176
1177 spin_lock_irqsave(&cdd->lock, flags);
1178 cdd->is_suspended = true;
1154 WARN_ON(!list_empty(&cdd->pending)); 1179 WARN_ON(!list_empty(&cdd->pending));
1180 spin_unlock_irqrestore(&cdd->lock, flags);
1155 1181
1156 return 0; 1182 return 0;
1157} 1183}
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1159static int __maybe_unused cppi41_runtime_resume(struct device *dev) 1185static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1160{ 1186{
1161 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1187 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1162 struct cppi41_channel *c, *_c;
1163 unsigned long flags; 1188 unsigned long flags;
1164 1189
1165 spin_lock_irqsave(&cdd->lock, flags); 1190 spin_lock_irqsave(&cdd->lock, flags);
1166 list_for_each_entry_safe(c, _c, &cdd->pending, node) { 1191 cdd->is_suspended = false;
1167 push_desc_queue(c); 1192 cppi41_run_queue(cdd);
1168 list_del(&c->node);
1169 }
1170 spin_unlock_irqrestore(&cdd->lock, flags); 1193 spin_unlock_irqrestore(&cdd->lock, flags);
1171 1194
1172 return 0; 1195 return 0;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 740bbb942594..7539f73df9e0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1699static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) 1699static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1700{ 1700{
1701 struct pl330_thread *thrd = NULL; 1701 struct pl330_thread *thrd = NULL;
1702 unsigned long flags;
1703 int chans, i; 1702 int chans, i;
1704 1703
1705 if (pl330->state == DYING) 1704 if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1707 1706
1708 chans = pl330->pcfg.num_chan; 1707 chans = pl330->pcfg.num_chan;
1709 1708
1710 spin_lock_irqsave(&pl330->lock, flags);
1711
1712 for (i = 0; i < chans; i++) { 1709 for (i = 0; i < chans; i++) {
1713 thrd = &pl330->channels[i]; 1710 thrd = &pl330->channels[i];
1714 if ((thrd->free) && (!_manager_ns(thrd) || 1711 if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1726 thrd = NULL; 1723 thrd = NULL;
1727 } 1724 }
1728 1725
1729 spin_unlock_irqrestore(&pl330->lock, flags);
1730
1731 return thrd; 1726 return thrd;
1732} 1727}
1733 1728
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
1745static void pl330_release_channel(struct pl330_thread *thrd) 1740static void pl330_release_channel(struct pl330_thread *thrd)
1746{ 1741{
1747 struct pl330_dmac *pl330; 1742 struct pl330_dmac *pl330;
1748 unsigned long flags;
1749 1743
1750 if (!thrd || thrd->free) 1744 if (!thrd || thrd->free)
1751 return; 1745 return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
1757 1751
1758 pl330 = thrd->dmac; 1752 pl330 = thrd->dmac;
1759 1753
1760 spin_lock_irqsave(&pl330->lock, flags);
1761 _free_event(thrd, thrd->ev); 1754 _free_event(thrd, thrd->ev);
1762 thrd->free = true; 1755 thrd->free = true;
1763 spin_unlock_irqrestore(&pl330->lock, flags);
1764} 1756}
1765 1757
1766/* Initialize the structure for PL330 configuration, that can be used 1758/* Initialize the structure for PL330 configuration, that can be used
@@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2122 struct pl330_dmac *pl330 = pch->dmac; 2114 struct pl330_dmac *pl330 = pch->dmac;
2123 unsigned long flags; 2115 unsigned long flags;
2124 2116
2125 spin_lock_irqsave(&pch->lock, flags); 2117 spin_lock_irqsave(&pl330->lock, flags);
2126 2118
2127 dma_cookie_init(chan); 2119 dma_cookie_init(chan);
2128 pch->cyclic = false; 2120 pch->cyclic = false;
2129 2121
2130 pch->thread = pl330_request_channel(pl330); 2122 pch->thread = pl330_request_channel(pl330);
2131 if (!pch->thread) { 2123 if (!pch->thread) {
2132 spin_unlock_irqrestore(&pch->lock, flags); 2124 spin_unlock_irqrestore(&pl330->lock, flags);
2133 return -ENOMEM; 2125 return -ENOMEM;
2134 } 2126 }
2135 2127
2136 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); 2128 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2137 2129
2138 spin_unlock_irqrestore(&pch->lock, flags); 2130 spin_unlock_irqrestore(&pl330->lock, flags);
2139 2131
2140 return 1; 2132 return 1;
2141} 2133}
@@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
2238static void pl330_free_chan_resources(struct dma_chan *chan) 2230static void pl330_free_chan_resources(struct dma_chan *chan)
2239{ 2231{
2240 struct dma_pl330_chan *pch = to_pchan(chan); 2232 struct dma_pl330_chan *pch = to_pchan(chan);
2233 struct pl330_dmac *pl330 = pch->dmac;
2241 unsigned long flags; 2234 unsigned long flags;
2242 2235
2243 tasklet_kill(&pch->task); 2236 tasklet_kill(&pch->task);
2244 2237
2245 pm_runtime_get_sync(pch->dmac->ddma.dev); 2238 pm_runtime_get_sync(pch->dmac->ddma.dev);
2246 spin_lock_irqsave(&pch->lock, flags); 2239 spin_lock_irqsave(&pl330->lock, flags);
2247 2240
2248 pl330_release_channel(pch->thread); 2241 pl330_release_channel(pch->thread);
2249 pch->thread = NULL; 2242 pch->thread = NULL;
@@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2251 if (pch->cyclic) 2244 if (pch->cyclic)
2252 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); 2245 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2253 2246
2254 spin_unlock_irqrestore(&pch->lock, flags); 2247 spin_unlock_irqrestore(&pl330->lock, flags);
2255 pm_runtime_mark_last_busy(pch->dmac->ddma.dev); 2248 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2256 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2249 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2257} 2250}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 921dfa047202..260c4b4b492e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
187struct exit_boot_struct { 187struct exit_boot_struct {
188 efi_memory_desc_t *runtime_map; 188 efi_memory_desc_t *runtime_map;
189 int *runtime_entry_count; 189 int *runtime_entry_count;
190 void *new_fdt_addr;
190}; 191};
191 192
192static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, 193static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
202 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, 203 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
203 p->runtime_map, p->runtime_entry_count); 204 p->runtime_map, p->runtime_entry_count);
204 205
205 return EFI_SUCCESS; 206 return update_fdt_memmap(p->new_fdt_addr, map);
206} 207}
207 208
208/* 209/*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
300 301
301 priv.runtime_map = runtime_map; 302 priv.runtime_map = runtime_map;
302 priv.runtime_entry_count = &runtime_entry_count; 303 priv.runtime_entry_count = &runtime_entry_count;
304 priv.new_fdt_addr = (void *)*new_fdt_addr;
303 status = efi_exit_boot_services(sys_table, handle, &map, &priv, 305 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
304 exit_boot_func); 306 exit_boot_func);
305 307
306 if (status == EFI_SUCCESS) { 308 if (status == EFI_SUCCESS) {
307 efi_set_virtual_address_map_t *svam; 309 efi_set_virtual_address_map_t *svam;
308 310
309 status = update_fdt_memmap((void *)*new_fdt_addr, &map);
310 if (status != EFI_SUCCESS) {
311 /*
312 * The kernel won't get far without the memory map, but
313 * may still be able to print something meaningful so
314 * return success here.
315 */
316 return EFI_SUCCESS;
317 }
318
319 /* Install the new virtual address map */ 311 /* Install the new virtual address map */
320 svam = sys_table->runtime->set_virtual_address_map; 312 svam = sys_table->runtime->set_virtual_address_map;
321 status = svam(runtime_entry_count * desc_size, desc_size, 313 status = svam(runtime_entry_count * desc_size, desc_size,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 86bf3b84ada5..a07ae9e37930 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1723} 1723}
1724 1724
1725/** 1725/**
1726 * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip 1726 * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
1727 * @gpiochip: the gpiochip to add the irqchip to 1727 * @gpiochip: the gpiochip to add the irqchip to
1728 * @irqchip: the irqchip to add to the gpiochip 1728 * @irqchip: the irqchip to add to the gpiochip
1729 * @first_irq: if not dynamically assigned, the base (first) IRQ to 1729 * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1749 * the pins on the gpiochip can generate a unique IRQ. Everything else 1749 * the pins on the gpiochip can generate a unique IRQ. Everything else
1750 * need to be open coded. 1750 * need to be open coded.
1751 */ 1751 */
1752int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 1752int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
1753 struct irq_chip *irqchip, 1753 struct irq_chip *irqchip,
1754 unsigned int first_irq, 1754 unsigned int first_irq,
1755 irq_flow_handler_t handler, 1755 irq_flow_handler_t handler,
1756 unsigned int type, 1756 unsigned int type,
1757 bool nested, 1757 bool nested,
1758 struct lock_class_key *lock_key) 1758 struct lock_class_key *lock_key)
1759{ 1759{
1760 struct device_node *of_node; 1760 struct device_node *of_node;
1761 bool irq_base_set = false; 1761 bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
1840 1840
1841 return 0; 1841 return 0;
1842} 1842}
1843EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); 1843EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
1844 1844
1845#else /* CONFIG_GPIOLIB_IRQCHIP */ 1845#else /* CONFIG_GPIOLIB_IRQCHIP */
1846 1846
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29d6d84d1c28..41e41f90265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
83 } 83 }
84 break; 84 break;
85 } 85 }
86
87 if (!(*out_ring && (*out_ring)->adev)) {
88 DRM_ERROR("Ring %d is not initialized on IP %d\n",
89 ring, ip_type);
90 return -EINVAL;
91 }
92
86 return 0; 93 return 0;
87} 94}
88 95
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9999dc71b998..ccb5e02e7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2512 2512
2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2515 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2516 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2515 2517
2516 return 0; 2518 return 0;
2517} 2519}
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2537 int32_t hot_y) 2539 int32_t hot_y)
2538{ 2540{
2539 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2540 struct amdgpu_device *adev = crtc->dev->dev_private;
2541 struct drm_gem_object *obj; 2542 struct drm_gem_object *obj;
2542 struct amdgpu_bo *aobj; 2543 struct amdgpu_bo *aobj;
2543 int ret; 2544 int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2578 2579
2579 dce_v10_0_lock_cursor(crtc, true); 2580 dce_v10_0_lock_cursor(crtc, true);
2580 2581
2581 if (hot_x != amdgpu_crtc->cursor_hot_x || 2582 if (width != amdgpu_crtc->cursor_width ||
2583 height != amdgpu_crtc->cursor_height ||
2584 hot_x != amdgpu_crtc->cursor_hot_x ||
2582 hot_y != amdgpu_crtc->cursor_hot_y) { 2585 hot_y != amdgpu_crtc->cursor_hot_y) {
2583 int x, y; 2586 int x, y;
2584 2587
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2587 2590
2588 dce_v10_0_cursor_move_locked(crtc, x, y); 2591 dce_v10_0_cursor_move_locked(crtc, x, y);
2589 2592
2590 amdgpu_crtc->cursor_hot_x = hot_x;
2591 amdgpu_crtc->cursor_hot_y = hot_y;
2592 }
2593
2594 if (width != amdgpu_crtc->cursor_width ||
2595 height != amdgpu_crtc->cursor_height) {
2596 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2597 (width - 1) << 16 | (height - 1));
2598 amdgpu_crtc->cursor_width = width; 2593 amdgpu_crtc->cursor_width = width;
2599 amdgpu_crtc->cursor_height = height; 2594 amdgpu_crtc->cursor_height = height;
2595 amdgpu_crtc->cursor_hot_x = hot_x;
2596 amdgpu_crtc->cursor_hot_y = hot_y;
2600 } 2597 }
2601 2598
2602 dce_v10_0_show_cursor(crtc); 2599 dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
2620static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2617static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2621{ 2618{
2622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2619 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2623 struct amdgpu_device *adev = crtc->dev->dev_private;
2624 2620
2625 if (amdgpu_crtc->cursor_bo) { 2621 if (amdgpu_crtc->cursor_bo) {
2626 dce_v10_0_lock_cursor(crtc, true); 2622 dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2628 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2624 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2629 amdgpu_crtc->cursor_y); 2625 amdgpu_crtc->cursor_y);
2630 2626
2631 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2632 (amdgpu_crtc->cursor_width - 1) << 16 |
2633 (amdgpu_crtc->cursor_height - 1));
2634
2635 dce_v10_0_show_cursor(crtc); 2627 dce_v10_0_show_cursor(crtc);
2636 2628
2637 dce_v10_0_lock_cursor(crtc, false); 2629 dce_v10_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 2006abbbfb62..a7af5b33a5e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2532 2532
2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2535 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2536 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2535 2537
2536 return 0; 2538 return 0;
2537} 2539}
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2557 int32_t hot_y) 2559 int32_t hot_y)
2558{ 2560{
2559 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2561 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2560 struct amdgpu_device *adev = crtc->dev->dev_private;
2561 struct drm_gem_object *obj; 2562 struct drm_gem_object *obj;
2562 struct amdgpu_bo *aobj; 2563 struct amdgpu_bo *aobj;
2563 int ret; 2564 int ret;
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2598 2599
2599 dce_v11_0_lock_cursor(crtc, true); 2600 dce_v11_0_lock_cursor(crtc, true);
2600 2601
2601 if (hot_x != amdgpu_crtc->cursor_hot_x || 2602 if (width != amdgpu_crtc->cursor_width ||
2603 height != amdgpu_crtc->cursor_height ||
2604 hot_x != amdgpu_crtc->cursor_hot_x ||
2602 hot_y != amdgpu_crtc->cursor_hot_y) { 2605 hot_y != amdgpu_crtc->cursor_hot_y) {
2603 int x, y; 2606 int x, y;
2604 2607
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2607 2610
2608 dce_v11_0_cursor_move_locked(crtc, x, y); 2611 dce_v11_0_cursor_move_locked(crtc, x, y);
2609 2612
2610 amdgpu_crtc->cursor_hot_x = hot_x;
2611 amdgpu_crtc->cursor_hot_y = hot_y;
2612 }
2613
2614 if (width != amdgpu_crtc->cursor_width ||
2615 height != amdgpu_crtc->cursor_height) {
2616 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2617 (width - 1) << 16 | (height - 1));
2618 amdgpu_crtc->cursor_width = width; 2613 amdgpu_crtc->cursor_width = width;
2619 amdgpu_crtc->cursor_height = height; 2614 amdgpu_crtc->cursor_height = height;
2615 amdgpu_crtc->cursor_hot_x = hot_x;
2616 amdgpu_crtc->cursor_hot_y = hot_y;
2620 } 2617 }
2621 2618
2622 dce_v11_0_show_cursor(crtc); 2619 dce_v11_0_show_cursor(crtc);
@@ -2640,7 +2637,6 @@ unpin:
2640static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2637static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2641{ 2638{
2642 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2639 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2643 struct amdgpu_device *adev = crtc->dev->dev_private;
2644 2640
2645 if (amdgpu_crtc->cursor_bo) { 2641 if (amdgpu_crtc->cursor_bo) {
2646 dce_v11_0_lock_cursor(crtc, true); 2642 dce_v11_0_lock_cursor(crtc, true);
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2648 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2644 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2649 amdgpu_crtc->cursor_y); 2645 amdgpu_crtc->cursor_y);
2650 2646
2651 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2652 (amdgpu_crtc->cursor_width - 1) << 16 |
2653 (amdgpu_crtc->cursor_height - 1));
2654
2655 dce_v11_0_show_cursor(crtc); 2647 dce_v11_0_show_cursor(crtc);
2656 2648
2657 dce_v11_0_lock_cursor(crtc, false); 2649 dce_v11_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b4e4ec630e8c..39df6a50637f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1859 struct amdgpu_device *adev = crtc->dev->dev_private; 1859 struct amdgpu_device *adev = crtc->dev->dev_private;
1860 int xorigin = 0, yorigin = 0; 1860 int xorigin = 0, yorigin = 0;
1861 1861
1862 int w = amdgpu_crtc->cursor_width;
1863
1862 amdgpu_crtc->cursor_x = x; 1864 amdgpu_crtc->cursor_x = x;
1863 amdgpu_crtc->cursor_y = y; 1865 amdgpu_crtc->cursor_y = y;
1864 1866
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1878 1880
1879 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1881 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1880 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1882 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1881 1885
1882 return 0; 1886 return 0;
1883} 1887}
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1903 int32_t hot_y) 1907 int32_t hot_y)
1904{ 1908{
1905 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1909 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1906 struct amdgpu_device *adev = crtc->dev->dev_private;
1907 struct drm_gem_object *obj; 1910 struct drm_gem_object *obj;
1908 struct amdgpu_bo *aobj; 1911 struct amdgpu_bo *aobj;
1909 int ret; 1912 int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1944 1947
1945 dce_v6_0_lock_cursor(crtc, true); 1948 dce_v6_0_lock_cursor(crtc, true);
1946 1949
1947 if (hot_x != amdgpu_crtc->cursor_hot_x || 1950 if (width != amdgpu_crtc->cursor_width ||
1951 height != amdgpu_crtc->cursor_height ||
1952 hot_x != amdgpu_crtc->cursor_hot_x ||
1948 hot_y != amdgpu_crtc->cursor_hot_y) { 1953 hot_y != amdgpu_crtc->cursor_hot_y) {
1949 int x, y; 1954 int x, y;
1950 1955
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1953 1958
1954 dce_v6_0_cursor_move_locked(crtc, x, y); 1959 dce_v6_0_cursor_move_locked(crtc, x, y);
1955 1960
1956 amdgpu_crtc->cursor_hot_x = hot_x;
1957 amdgpu_crtc->cursor_hot_y = hot_y;
1958 }
1959
1960 if (width != amdgpu_crtc->cursor_width ||
1961 height != amdgpu_crtc->cursor_height) {
1962 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1963 (width - 1) << 16 | (height - 1));
1964 amdgpu_crtc->cursor_width = width; 1961 amdgpu_crtc->cursor_width = width;
1965 amdgpu_crtc->cursor_height = height; 1962 amdgpu_crtc->cursor_height = height;
1963 amdgpu_crtc->cursor_hot_x = hot_x;
1964 amdgpu_crtc->cursor_hot_y = hot_y;
1966 } 1965 }
1967 1966
1968 dce_v6_0_show_cursor(crtc); 1967 dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
1986static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1987{ 1986{
1988 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1989 struct amdgpu_device *adev = crtc->dev->dev_private;
1990 1988
1991 if (amdgpu_crtc->cursor_bo) { 1989 if (amdgpu_crtc->cursor_bo) {
1992 dce_v6_0_lock_cursor(crtc, true); 1990 dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1994 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 1992 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1995 amdgpu_crtc->cursor_y); 1993 amdgpu_crtc->cursor_y);
1996 1994
1997 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1998 (amdgpu_crtc->cursor_width - 1) << 16 |
1999 (amdgpu_crtc->cursor_height - 1));
2000
2001 dce_v6_0_show_cursor(crtc); 1995 dce_v6_0_show_cursor(crtc);
2002 dce_v6_0_lock_cursor(crtc, false); 1996 dce_v6_0_lock_cursor(crtc, false);
2003 } 1997 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 584abe834a3c..28102bb1704d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2363 2363
2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2366 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2367 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2366 2368
2367 return 0; 2369 return 0;
2368} 2370}
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2388 int32_t hot_y) 2390 int32_t hot_y)
2389{ 2391{
2390 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2392 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2391 struct amdgpu_device *adev = crtc->dev->dev_private;
2392 struct drm_gem_object *obj; 2393 struct drm_gem_object *obj;
2393 struct amdgpu_bo *aobj; 2394 struct amdgpu_bo *aobj;
2394 int ret; 2395 int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2429 2430
2430 dce_v8_0_lock_cursor(crtc, true); 2431 dce_v8_0_lock_cursor(crtc, true);
2431 2432
2432 if (hot_x != amdgpu_crtc->cursor_hot_x || 2433 if (width != amdgpu_crtc->cursor_width ||
2434 height != amdgpu_crtc->cursor_height ||
2435 hot_x != amdgpu_crtc->cursor_hot_x ||
2433 hot_y != amdgpu_crtc->cursor_hot_y) { 2436 hot_y != amdgpu_crtc->cursor_hot_y) {
2434 int x, y; 2437 int x, y;
2435 2438
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2438 2441
2439 dce_v8_0_cursor_move_locked(crtc, x, y); 2442 dce_v8_0_cursor_move_locked(crtc, x, y);
2440 2443
2441 amdgpu_crtc->cursor_hot_x = hot_x;
2442 amdgpu_crtc->cursor_hot_y = hot_y;
2443 }
2444
2445 if (width != amdgpu_crtc->cursor_width ||
2446 height != amdgpu_crtc->cursor_height) {
2447 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2448 (width - 1) << 16 | (height - 1));
2449 amdgpu_crtc->cursor_width = width; 2444 amdgpu_crtc->cursor_width = width;
2450 amdgpu_crtc->cursor_height = height; 2445 amdgpu_crtc->cursor_height = height;
2446 amdgpu_crtc->cursor_hot_x = hot_x;
2447 amdgpu_crtc->cursor_hot_y = hot_y;
2451 } 2448 }
2452 2449
2453 dce_v8_0_show_cursor(crtc); 2450 dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
2471static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2468static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2472{ 2469{
2473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2470 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2474 struct amdgpu_device *adev = crtc->dev->dev_private;
2475 2471
2476 if (amdgpu_crtc->cursor_bo) { 2472 if (amdgpu_crtc->cursor_bo) {
2477 dce_v8_0_lock_cursor(crtc, true); 2473 dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2479 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2475 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2480 amdgpu_crtc->cursor_y); 2476 amdgpu_crtc->cursor_y);
2481 2477
2482 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2483 (amdgpu_crtc->cursor_width - 1) << 16 |
2484 (amdgpu_crtc->cursor_height - 1));
2485
2486 dce_v8_0_show_cursor(crtc); 2478 dce_v8_0_show_cursor(crtc);
2487 2479
2488 dce_v8_0_lock_cursor(crtc, false); 2480 dce_v8_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 762f8e82ceb7..e9a176891e13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
627 627
628static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) 628static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
629{ 629{
630 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
631
632 kfree(amdgpu_encoder->enc_priv);
633 drm_encoder_cleanup(encoder); 630 drm_encoder_cleanup(encoder);
634 kfree(amdgpu_encoder); 631 kfree(encoder);
635} 632}
636 633
637static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { 634static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 45a573e63d4a..0635829b18cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
44MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 44MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
45MODULE_FIRMWARE("radeon/verde_mc.bin"); 45MODULE_FIRMWARE("radeon/verde_mc.bin");
46MODULE_FIRMWARE("radeon/oland_mc.bin"); 46MODULE_FIRMWARE("radeon/oland_mc.bin");
47MODULE_FIRMWARE("radeon/si58_mc.bin");
47 48
48#define MC_SEQ_MISC0__MT__MASK 0xf0000000 49#define MC_SEQ_MISC0__MT__MASK 0xf0000000
49#define MC_SEQ_MISC0__MT__GDDR1 0x10000000 50#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
113 const char *chip_name; 114 const char *chip_name;
114 char fw_name[30]; 115 char fw_name[30];
115 int err; 116 int err;
117 bool is_58_fw = false;
116 118
117 DRM_DEBUG("\n"); 119 DRM_DEBUG("\n");
118 120
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
135 default: BUG(); 137 default: BUG();
136 } 138 }
137 139
138 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 140 /* this memory configuration requires special firmware */
141 if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
142 is_58_fw = true;
143
144 if (is_58_fw)
145 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
146 else
147 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
139 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 148 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
140 if (err) 149 if (err)
141 goto out; 150 goto out;
@@ -245,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
245 } 254 }
246 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); 255 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
247 256
257 if (adev->mode_info.num_crtc)
258 amdgpu_display_set_vga_render_state(adev, false);
259
248 gmc_v6_0_mc_stop(adev, &save); 260 gmc_v6_0_mc_stop(adev, &save);
249 261
250 if (gmc_v6_0_wait_for_idle((void *)adev)) { 262 if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -274,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
274 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 286 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
275 } 287 }
276 gmc_v6_0_mc_resume(adev, &save); 288 gmc_v6_0_mc_resume(adev, &save);
277 amdgpu_display_set_vga_render_state(adev, false);
278} 289}
279 290
280static int gmc_v6_0_mc_init(struct amdgpu_device *adev) 291static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -463,19 +474,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
463 WREG32(mmVM_CONTEXT1_CNTL, 474 WREG32(mmVM_CONTEXT1_CNTL,
464 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 475 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
465 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 476 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
466 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | 477 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
467 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 478 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
468 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 479 gmc_v6_0_set_fault_enable_default(adev, false);
469 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 480 else
470 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 481 gmc_v6_0_set_fault_enable_default(adev, true);
471 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
473 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
475 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
477 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
479 482
480 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 483 gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
481 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 484 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +757,10 @@ static int gmc_v6_0_late_init(void *handle)
754{ 757{
755 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 758 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
756 759
757 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 760 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
761 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
762 else
763 return 0;
758} 764}
759 765
760static int gmc_v6_0_sw_init(void *handle) 766static int gmc_v6_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 10bedfac27b8..6e150db8f380 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
64MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 64MODULE_FIRMWARE("radeon/oland_k_smc.bin");
65MODULE_FIRMWARE("radeon/hainan_smc.bin"); 65MODULE_FIRMWARE("radeon/hainan_smc.bin");
66MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 66MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
67MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
67 68
68union power_info { 69union power_info {
69 struct _ATOM_POWERPLAY_INFO info; 70 struct _ATOM_POWERPLAY_INFO info;
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3487 (adev->pdev->device == 0x6817) || 3488 (adev->pdev->device == 0x6817) ||
3488 (adev->pdev->device == 0x6806)) 3489 (adev->pdev->device == 0x6806))
3489 max_mclk = 120000; 3490 max_mclk = 120000;
3490 } else if (adev->asic_type == CHIP_OLAND) {
3491 if ((adev->pdev->revision == 0xC7) ||
3492 (adev->pdev->revision == 0x80) ||
3493 (adev->pdev->revision == 0x81) ||
3494 (adev->pdev->revision == 0x83) ||
3495 (adev->pdev->revision == 0x87) ||
3496 (adev->pdev->device == 0x6604) ||
3497 (adev->pdev->device == 0x6605)) {
3498 max_sclk = 75000;
3499 max_mclk = 80000;
3500 }
3501 } else if (adev->asic_type == CHIP_HAINAN) { 3491 } else if (adev->asic_type == CHIP_HAINAN) {
3502 if ((adev->pdev->revision == 0x81) || 3492 if ((adev->pdev->revision == 0x81) ||
3503 (adev->pdev->revision == 0x83) || 3493 (adev->pdev->revision == 0x83) ||
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3506 (adev->pdev->device == 0x6665) || 3496 (adev->pdev->device == 0x6665) ||
3507 (adev->pdev->device == 0x6667)) { 3497 (adev->pdev->device == 0x6667)) {
3508 max_sclk = 75000; 3498 max_sclk = 75000;
3509 max_mclk = 80000;
3510 } 3499 }
3511 } 3500 }
3512 /* Apply dpm quirks */ 3501 /* Apply dpm quirks */
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
7713 ((adev->pdev->device == 0x6660) || 7702 ((adev->pdev->device == 0x6660) ||
7714 (adev->pdev->device == 0x6663) || 7703 (adev->pdev->device == 0x6663) ||
7715 (adev->pdev->device == 0x6665) || 7704 (adev->pdev->device == 0x6665) ||
7716 (adev->pdev->device == 0x6667))) || 7705 (adev->pdev->device == 0x6667))))
7717 ((adev->pdev->revision == 0xc3) &&
7718 (adev->pdev->device == 0x6665)))
7719 chip_name = "hainan_k"; 7706 chip_name = "hainan_k";
7707 else if ((adev->pdev->revision == 0xc3) &&
7708 (adev->pdev->device == 0x6665))
7709 chip_name = "banks_k_2";
7720 else 7710 else
7721 chip_name = "hainan"; 7711 chip_name = "hainan";
7722 break; 7712 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 96444e4d862a..7fb9137dd89b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -40,13 +40,14 @@
40#include "smu/smu_7_0_1_sh_mask.h" 40#include "smu/smu_7_0_1_sh_mask.h"
41 41
42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
44static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
46static int uvd_v4_2_start(struct amdgpu_device *adev); 45static int uvd_v4_2_start(struct amdgpu_device *adev);
47static void uvd_v4_2_stop(struct amdgpu_device *adev); 46static void uvd_v4_2_stop(struct amdgpu_device *adev);
48static int uvd_v4_2_set_clockgating_state(void *handle, 47static int uvd_v4_2_set_clockgating_state(void *handle,
49 enum amd_clockgating_state state); 48 enum amd_clockgating_state state);
49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 bool sw_mode);
50/** 51/**
51 * uvd_v4_2_ring_get_rptr - get read pointer 52 * uvd_v4_2_ring_get_rptr - get read pointer
52 * 53 *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
140 141
141 return r; 142 return r;
142} 143}
143 144static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
145 bool enable);
144/** 146/**
145 * uvd_v4_2_hw_init - start and test UVD block 147 * uvd_v4_2_hw_init - start and test UVD block
146 * 148 *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
155 uint32_t tmp; 157 uint32_t tmp;
156 int r; 158 int r;
157 159
158 uvd_v4_2_init_cg(adev); 160 uvd_v4_2_enable_mgcg(adev, true);
159 uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
160 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 161 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
161 r = uvd_v4_2_start(adev); 162 r = uvd_v4_2_start(adev);
162 if (r) 163 if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
266 struct amdgpu_ring *ring = &adev->uvd.ring; 267 struct amdgpu_ring *ring = &adev->uvd.ring;
267 uint32_t rb_bufsz; 268 uint32_t rb_bufsz;
268 int i, j, r; 269 int i, j, r;
269
270 /* disable byte swapping */ 270 /* disable byte swapping */
271 u32 lmi_swap_cntl = 0; 271 u32 lmi_swap_cntl = 0;
272 u32 mp_swap_cntl = 0; 272 u32 mp_swap_cntl = 0;
273 273
274 WREG32(mmUVD_CGC_GATE, 0);
275 uvd_v4_2_set_dcm(adev, true);
276
274 uvd_v4_2_mc_resume(adev); 277 uvd_v4_2_mc_resume(adev);
275 278
276 /* disable interupt */ 279 /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
406 409
407 /* Unstall UMC and register bus */ 410 /* Unstall UMC and register bus */
408 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 411 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
412
413 uvd_v4_2_set_dcm(adev, false);
409} 414}
410 415
411/** 416/**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
619 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 624 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
620} 625}
621 626
622static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
623{
624 bool hw_mode = true;
625
626 if (hw_mode) {
627 uvd_v4_2_set_dcm(adev, false);
628 } else {
629 u32 tmp = RREG32(mmUVD_CGC_CTRL);
630 tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
631 WREG32(mmUVD_CGC_CTRL, tmp);
632 }
633}
634
635static bool uvd_v4_2_is_idle(void *handle) 627static bool uvd_v4_2_is_idle(void *handle)
636{ 628{
637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 629 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
685static int uvd_v4_2_set_clockgating_state(void *handle, 677static int uvd_v4_2_set_clockgating_state(void *handle,
686 enum amd_clockgating_state state) 678 enum amd_clockgating_state state)
687{ 679{
688 bool gate = false;
689 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
690
691 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
692 return 0;
693
694 if (state == AMD_CG_STATE_GATE)
695 gate = true;
696
697 uvd_v4_2_enable_mgcg(adev, gate);
698
699 return 0; 680 return 0;
700} 681}
701 682
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
711 */ 692 */
712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
713 694
714 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
715 return 0;
716
717 if (state == AMD_PG_STATE_GATE) { 695 if (state == AMD_PG_STATE_GATE) {
718 uvd_v4_2_stop(adev); 696 uvd_v4_2_stop(adev);
719 return 0; 697 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 5fb0b7f5c065..37ca685e5a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
43 43
44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
45#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 45#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
46#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
47
46#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 48#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
47#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 49#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
48#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 50#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
51#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
52
49#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 53#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
50 54
51#define VCE_V3_0_FW_SIZE (384 * 1024) 55#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
54 58
55#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 59#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
56 60
61#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
62 | GRBM_GFX_INDEX__VCE_ALL_PIPE)
63
57static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 64static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
58static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 65static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
59static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 66static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
175 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 182 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
176 183
177 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 184 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
178 data &= ~0xffc00000; 185 data &= ~0x3ff;
179 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 186 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
180 187
181 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 188 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
249 if (adev->vce.harvest_config & (1 << idx)) 256 if (adev->vce.harvest_config & (1 << idx))
250 continue; 257 continue;
251 258
252 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 259 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
253 vce_v3_0_mc_resume(adev, idx); 260 vce_v3_0_mc_resume(adev, idx);
254 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 261 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
255 262
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
273 } 280 }
274 } 281 }
275 282
276 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 283 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
277 mutex_unlock(&adev->grbm_idx_mutex); 284 mutex_unlock(&adev->grbm_idx_mutex);
278 285
279 return 0; 286 return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
288 if (adev->vce.harvest_config & (1 << idx)) 295 if (adev->vce.harvest_config & (1 << idx))
289 continue; 296 continue;
290 297
291 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 298 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
292 299
293 if (adev->asic_type >= CHIP_STONEY) 300 if (adev->asic_type >= CHIP_STONEY)
294 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 301 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
306 vce_v3_0_set_vce_sw_clock_gating(adev, false); 313 vce_v3_0_set_vce_sw_clock_gating(adev, false);
307 } 314 }
308 315
309 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 316 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
310 mutex_unlock(&adev->grbm_idx_mutex); 317 mutex_unlock(&adev->grbm_idx_mutex);
311 318
312 return 0; 319 return 0;
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
586 * VCE team suggest use bit 3--bit 6 for busy status check 593 * VCE team suggest use bit 3--bit 6 for busy status check
587 */ 594 */
588 mutex_lock(&adev->grbm_idx_mutex); 595 mutex_lock(&adev->grbm_idx_mutex);
589 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 596 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
590 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 597 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 598 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
592 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 599 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
593 } 600 }
594 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 601 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
595 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 602 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
596 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 603 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
597 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 604 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
598 } 605 }
599 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 606 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
600 mutex_unlock(&adev->grbm_idx_mutex); 607 mutex_unlock(&adev->grbm_idx_mutex);
601 608
602 if (srbm_soft_reset) { 609 if (srbm_soft_reset) {
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
734 if (adev->vce.harvest_config & (1 << i)) 741 if (adev->vce.harvest_config & (1 << i))
735 continue; 742 continue;
736 743
737 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 744 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
738 745
739 if (enable) { 746 if (enable) {
740 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 747 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
753 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 760 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
754 } 761 }
755 762
756 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 763 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
757 mutex_unlock(&adev->grbm_idx_mutex); 764 mutex_unlock(&adev->grbm_idx_mutex);
758 765
759 return 0; 766 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b0c63c5f54c9..6bb79c94cb9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
200 cgs_set_clockgating_state( 200 cgs_set_clockgating_state(
201 hwmgr->device, 201 hwmgr->device,
202 AMD_IP_BLOCK_TYPE_VCE, 202 AMD_IP_BLOCK_TYPE_VCE,
203 AMD_CG_STATE_UNGATE); 203 AMD_CG_STATE_GATE);
204 cgs_set_powergating_state( 204 cgs_set_powergating_state(
205 hwmgr->device, 205 hwmgr->device,
206 AMD_IP_BLOCK_TYPE_VCE, 206 AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
218 cgs_set_clockgating_state( 218 cgs_set_clockgating_state(
219 hwmgr->device, 219 hwmgr->device,
220 AMD_IP_BLOCK_TYPE_VCE, 220 AMD_IP_BLOCK_TYPE_VCE,
221 AMD_PG_STATE_GATE); 221 AMD_PG_STATE_UNGATE);
222 cz_dpm_update_vce_dpm(hwmgr); 222 cz_dpm_update_vce_dpm(hwmgr);
223 cz_enable_disable_vce_dpm(hwmgr, true); 223 cz_enable_disable_vce_dpm(hwmgr, true);
224 return 0; 224 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 4b14f259a147..0fb4e8c8f5e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1402 cz_hwmgr->vce_dpm.hard_min_clk, 1402 cz_hwmgr->vce_dpm.hard_min_clk,
1403 PPSMC_MSG_SetEclkHardMin)); 1403 PPSMC_MSG_SetEclkHardMin));
1404 } else { 1404 } else {
1405 /*EPR# 419220 -HW limitation to to */ 1405 /*Program HardMin based on the vce_arbiter.ecclk */
1406 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1406 if (hwmgr->vce_arbiter.ecclk == 0) {
1407 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1407 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1408 PPSMC_MSG_SetEclkHardMin, 1408 PPSMC_MSG_SetEclkHardMin, 0);
1409 cz_get_eclk_level(hwmgr, 1409 /* disable ECLK DPM 0. Otherwise VCE could hang if
1410 cz_hwmgr->vce_dpm.hard_min_clk, 1410 * switching SCLK from DPM 0 to 6/7 */
1411 PPSMC_MSG_SetEclkHardMin)); 1411 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1412 1412 PPSMC_MSG_SetEclkSoftMin, 1);
1413 } else {
1414 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1415 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1416 PPSMC_MSG_SetEclkHardMin,
1417 cz_get_eclk_level(hwmgr,
1418 cz_hwmgr->vce_dpm.hard_min_clk,
1419 PPSMC_MSG_SetEclkHardMin));
1420 }
1413 } 1421 }
1414 return 0; 1422 return 0;
1415} 1423}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d2c8f5..7abda94fc2cf 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
113 struct ttm_bo_kmap_obj cache_kmap; 113 struct ttm_bo_kmap_obj cache_kmap;
114 int next_cursor; 114 int next_cursor;
115 bool support_wide_screen; 115 bool support_wide_screen;
116 bool DisableP2A;
116 117
117 enum ast_tx_chip tx_chip_type; 118 enum ast_tx_chip tx_chip_type;
118 u8 dp501_maxclk; 119 u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c6421db62..533e762d036d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
124 } else 124 } else
125 *need_post = false; 125 *need_post = false;
126 126
127 /* Check P2A Access */
128 ast->DisableP2A = true;
129 data = ast_read32(ast, 0xf004);
130 if (data != 0xFFFFFFFF)
131 ast->DisableP2A = false;
132
127 /* Check if we support wide screen */ 133 /* Check if we support wide screen */
128 switch (ast->chip) { 134 switch (ast->chip) {
129 case AST1180: 135 case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
140 ast->support_wide_screen = true; 146 ast->support_wide_screen = true;
141 else { 147 else {
142 ast->support_wide_screen = false; 148 ast->support_wide_screen = false;
143 /* Read SCU7c (silicon revision register) */ 149 if (ast->DisableP2A == false) {
144 ast_write32(ast, 0xf004, 0x1e6e0000); 150 /* Read SCU7c (silicon revision register) */
145 ast_write32(ast, 0xf000, 0x1); 151 ast_write32(ast, 0xf004, 0x1e6e0000);
146 data = ast_read32(ast, 0x1207c); 152 ast_write32(ast, 0xf000, 0x1);
147 data &= 0x300; 153 data = ast_read32(ast, 0x1207c);
148 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 154 data &= 0x300;
149 ast->support_wide_screen = true; 155 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
150 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 156 ast->support_wide_screen = true;
151 ast->support_wide_screen = true; 157 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
158 ast->support_wide_screen = true;
159 }
152 } 160 }
153 break; 161 break;
154 } 162 }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
216 uint32_t data, data2; 224 uint32_t data, data2;
217 uint32_t denum, num, div, ref_pll; 225 uint32_t denum, num, div, ref_pll;
218 226
219 ast_write32(ast, 0xf004, 0x1e6e0000); 227 if (ast->DisableP2A)
220 ast_write32(ast, 0xf000, 0x1); 228 {
221
222
223 ast_write32(ast, 0x10000, 0xfc600309);
224
225 do {
226 if (pci_channel_offline(dev->pdev))
227 return -EIO;
228 } while (ast_read32(ast, 0x10000) != 0x01);
229 data = ast_read32(ast, 0x10004);
230
231 if (data & 0x40)
232 ast->dram_bus_width = 16; 229 ast->dram_bus_width = 16;
230 ast->dram_type = AST_DRAM_1Gx16;
231 ast->mclk = 396;
232 }
233 else 233 else
234 ast->dram_bus_width = 32; 234 {
235 ast_write32(ast, 0xf004, 0x1e6e0000);
236 ast_write32(ast, 0xf000, 0x1);
237 data = ast_read32(ast, 0x10004);
238
239 if (data & 0x40)
240 ast->dram_bus_width = 16;
241 else
242 ast->dram_bus_width = 32;
243
244 if (ast->chip == AST2300 || ast->chip == AST2400) {
245 switch (data & 0x03) {
246 case 0:
247 ast->dram_type = AST_DRAM_512Mx16;
248 break;
249 default:
250 case 1:
251 ast->dram_type = AST_DRAM_1Gx16;
252 break;
253 case 2:
254 ast->dram_type = AST_DRAM_2Gx16;
255 break;
256 case 3:
257 ast->dram_type = AST_DRAM_4Gx16;
258 break;
259 }
260 } else {
261 switch (data & 0x0c) {
262 case 0:
263 case 4:
264 ast->dram_type = AST_DRAM_512Mx16;
265 break;
266 case 8:
267 if (data & 0x40)
268 ast->dram_type = AST_DRAM_1Gx16;
269 else
270 ast->dram_type = AST_DRAM_512Mx32;
271 break;
272 case 0xc:
273 ast->dram_type = AST_DRAM_1Gx32;
274 break;
275 }
276 }
235 277
236 if (ast->chip == AST2300 || ast->chip == AST2400) { 278 data = ast_read32(ast, 0x10120);
237 switch (data & 0x03) { 279 data2 = ast_read32(ast, 0x10170);
238 case 0: 280 if (data2 & 0x2000)
239 ast->dram_type = AST_DRAM_512Mx16; 281 ref_pll = 14318;
240 break; 282 else
241 default: 283 ref_pll = 12000;
242 case 1: 284
243 ast->dram_type = AST_DRAM_1Gx16; 285 denum = data & 0x1f;
244 break; 286 num = (data & 0x3fe0) >> 5;
245 case 2: 287 data = (data & 0xc000) >> 14;
246 ast->dram_type = AST_DRAM_2Gx16; 288 switch (data) {
247 break;
248 case 3: 289 case 3:
249 ast->dram_type = AST_DRAM_4Gx16; 290 div = 0x4;
250 break;
251 }
252 } else {
253 switch (data & 0x0c) {
254 case 0:
255 case 4:
256 ast->dram_type = AST_DRAM_512Mx16;
257 break; 291 break;
258 case 8: 292 case 2:
259 if (data & 0x40) 293 case 1:
260 ast->dram_type = AST_DRAM_1Gx16; 294 div = 0x2;
261 else
262 ast->dram_type = AST_DRAM_512Mx32;
263 break; 295 break;
264 case 0xc: 296 default:
265 ast->dram_type = AST_DRAM_1Gx32; 297 div = 0x1;
266 break; 298 break;
267 } 299 }
300 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
268 } 301 }
269
270 data = ast_read32(ast, 0x10120);
271 data2 = ast_read32(ast, 0x10170);
272 if (data2 & 0x2000)
273 ref_pll = 14318;
274 else
275 ref_pll = 12000;
276
277 denum = data & 0x1f;
278 num = (data & 0x3fe0) >> 5;
279 data = (data & 0xc000) >> 14;
280 switch (data) {
281 case 3:
282 div = 0x4;
283 break;
284 case 2:
285 case 1:
286 div = 0x2;
287 break;
288 default:
289 div = 0x1;
290 break;
291 }
292 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
293 return 0; 302 return 0;
294} 303}
295 304
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d92b99..5331ee1df086 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
379 ast_open_key(ast); 379 ast_open_key(ast);
380 ast_set_def_ext_reg(dev); 380 ast_set_def_ext_reg(dev);
381 381
382 if (ast->chip == AST2300 || ast->chip == AST2400) 382 if (ast->DisableP2A == false)
383 ast_init_dram_2300(dev); 383 {
384 else 384 if (ast->chip == AST2300 || ast->chip == AST2400)
385 ast_init_dram_reg(dev); 385 ast_init_dram_2300(dev);
386 else
387 ast_init_dram_reg(dev);
386 388
387 ast_init_3rdtx(dev); 389 ast_init_3rdtx(dev);
390 }
391 else
392 {
393 if (ast->tx_chip_type != AST_TX_NONE)
394 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
395 }
388} 396}
389 397
390/* AST 2300 DRAM settings */ 398/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index eb9bf8786c24..18eefdcbf1ba 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
1382 1382
1383 pm_runtime_enable(dev); 1383 pm_runtime_enable(dev);
1384 1384
1385 pm_runtime_get_sync(dev);
1385 phy_power_on(dp->phy); 1386 phy_power_on(dp->phy);
1386 1387
1387 analogix_dp_init_dp(dp); 1388 analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
1414 goto err_disable_pm_runtime; 1415 goto err_disable_pm_runtime;
1415 } 1416 }
1416 1417
1418 phy_power_off(dp->phy);
1419 pm_runtime_put(dev);
1420
1417 return 0; 1421 return 0;
1418 1422
1419err_disable_pm_runtime: 1423err_disable_pm_runtime:
1424
1425 phy_power_off(dp->phy);
1426 pm_runtime_put(dev);
1420 pm_runtime_disable(dev); 1427 pm_runtime_disable(dev);
1421 1428
1422 return ret; 1429 return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 04b3c161dfae..7f4cc6e172ab 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
7 This is a KMS driver for emulated cirrus device in qemu. 7 This is a KMS driver for emulated cirrus device in qemu.
8 It is *NOT* intended for real cirrus devices. This requires 8 It is *NOT* intended for real cirrus devices. This requires
9 the modesetting userspace X.org driver. 9 the modesetting userspace X.org driver.
10
11 Cirrus is obsolete, the hardware was designed in the 90ies
12 and can't keep up with todays needs. More background:
13 https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
14
15 Better alternatives are:
16 - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
17 - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
18 - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 60697482b94c..fdfb1ec17e66 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
291EXPORT_SYMBOL(drm_atomic_get_crtc_state); 291EXPORT_SYMBOL(drm_atomic_get_crtc_state);
292 292
293static void set_out_fence_for_crtc(struct drm_atomic_state *state, 293static void set_out_fence_for_crtc(struct drm_atomic_state *state,
294 struct drm_crtc *crtc, s64 __user *fence_ptr) 294 struct drm_crtc *crtc, s32 __user *fence_ptr)
295{ 295{
296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
297} 297}
298 298
299static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 299static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
300 struct drm_crtc *crtc) 300 struct drm_crtc *crtc)
301{ 301{
302 s64 __user *fence_ptr; 302 s32 __user *fence_ptr;
303 303
304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
512 state->color_mgmt_changed |= replaced; 512 state->color_mgmt_changed |= replaced;
513 return ret; 513 return ret;
514 } else if (property == config->prop_out_fence_ptr) { 514 } else if (property == config->prop_out_fence_ptr) {
515 s64 __user *fence_ptr = u64_to_user_ptr(val); 515 s32 __user *fence_ptr = u64_to_user_ptr(val);
516 516
517 if (!fence_ptr) 517 if (!fence_ptr)
518 return 0; 518 return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1915 */ 1915 */
1916 1916
1917struct drm_out_fence_state { 1917struct drm_out_fence_state {
1918 s64 __user *out_fence_ptr; 1918 s32 __user *out_fence_ptr;
1919 struct sync_file *sync_file; 1919 struct sync_file *sync_file;
1920 int fd; 1920 int fd;
1921}; 1921};
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
1952 return 0; 1952 return 0;
1953 1953
1954 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1954 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1955 u64 __user *fence_ptr; 1955 s32 __user *fence_ptr;
1956 1956
1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1958 1958
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
2032 } 2032 }
2033 2033
2034 for_each_crtc_in_state(state, crtc, crtc_state, i) { 2034 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2035 struct drm_pending_vblank_event *event = crtc_state->event;
2035 /* 2036 /*
2036 * TEST_ONLY and PAGE_FLIP_EVENT are mutually 2037 * Free the allocated event. drm_atomic_helper_setup_commit
2037 * exclusive, if they weren't, this code should be 2038 * can allocate an event too, so only free it if it's ours
2038 * called on success for TEST_ONLY too. 2039 * to prevent a double free in drm_atomic_state_clear.
2039 */ 2040 */
2040 if (crtc_state->event) 2041 if (event && (event->base.fence || event->base.file_priv)) {
2041 drm_event_cancel_free(dev, &crtc_state->event->base); 2042 drm_event_cancel_free(dev, &event->base);
2043 crtc_state->event = NULL;
2044 }
2042 } 2045 }
2043 2046
2044 if (!fence_state) 2047 if (!fence_state)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 34f757bcabae..4594477dee00 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1666 1666
1667 funcs = plane->helper_private; 1667 funcs = plane->helper_private;
1668 1668
1669 if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
1670 continue;
1671
1672 if (funcs->prepare_fb) { 1669 if (funcs->prepare_fb) {
1673 ret = funcs->prepare_fb(plane, plane_state); 1670 ret = funcs->prepare_fb(plane, plane_state);
1674 if (ret) 1671 if (ret)
@@ -1685,9 +1682,6 @@ fail:
1685 if (j >= i) 1682 if (j >= i)
1686 continue; 1683 continue;
1687 1684
1688 if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
1689 continue;
1690
1691 funcs = plane->helper_private; 1685 funcs = plane->helper_private;
1692 1686
1693 if (funcs->cleanup_fb) 1687 if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1954 for_each_plane_in_state(old_state, plane, plane_state, i) { 1948 for_each_plane_in_state(old_state, plane, plane_state, i) {
1955 const struct drm_plane_helper_funcs *funcs; 1949 const struct drm_plane_helper_funcs *funcs;
1956 1950
1957 if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
1958 continue;
1959
1960 funcs = plane->helper_private; 1951 funcs = plane->helper_private;
1961 1952
1962 if (funcs->cleanup_fb) 1953 if (funcs->cleanup_fb)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5a4526289392..7a7019ac9388 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
225 225
226 INIT_LIST_HEAD(&connector->probed_modes); 226 INIT_LIST_HEAD(&connector->probed_modes);
227 INIT_LIST_HEAD(&connector->modes); 227 INIT_LIST_HEAD(&connector->modes);
228 mutex_init(&connector->mutex);
228 connector->edid_blob_ptr = NULL; 229 connector->edid_blob_ptr = NULL;
229 connector->status = connector_status_unknown; 230 connector->status = connector_status_unknown;
230 231
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
359 connector->funcs->atomic_destroy_state(connector, 360 connector->funcs->atomic_destroy_state(connector,
360 connector->state); 361 connector->state);
361 362
363 mutex_destroy(&connector->mutex);
364
362 memset(connector, 0, sizeof(*connector)); 365 memset(connector, 0, sizeof(*connector));
363} 366}
364EXPORT_SYMBOL(drm_connector_cleanup); 367EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
374 */ 377 */
375int drm_connector_register(struct drm_connector *connector) 378int drm_connector_register(struct drm_connector *connector)
376{ 379{
377 int ret; 380 int ret = 0;
378 381
379 if (connector->registered) 382 if (!connector->dev->registered)
380 return 0; 383 return 0;
381 384
385 mutex_lock(&connector->mutex);
386 if (connector->registered)
387 goto unlock;
388
382 ret = drm_sysfs_connector_add(connector); 389 ret = drm_sysfs_connector_add(connector);
383 if (ret) 390 if (ret)
384 return ret; 391 goto unlock;
385 392
386 ret = drm_debugfs_connector_add(connector); 393 ret = drm_debugfs_connector_add(connector);
387 if (ret) { 394 if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
397 drm_mode_object_register(connector->dev, &connector->base); 404 drm_mode_object_register(connector->dev, &connector->base);
398 405
399 connector->registered = true; 406 connector->registered = true;
400 return 0; 407 goto unlock;
401 408
402err_debugfs: 409err_debugfs:
403 drm_debugfs_connector_remove(connector); 410 drm_debugfs_connector_remove(connector);
404err_sysfs: 411err_sysfs:
405 drm_sysfs_connector_remove(connector); 412 drm_sysfs_connector_remove(connector);
413unlock:
414 mutex_unlock(&connector->mutex);
406 return ret; 415 return ret;
407} 416}
408EXPORT_SYMBOL(drm_connector_register); 417EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
415 */ 424 */
416void drm_connector_unregister(struct drm_connector *connector) 425void drm_connector_unregister(struct drm_connector *connector)
417{ 426{
418 if (!connector->registered) 427 mutex_lock(&connector->mutex);
428 if (!connector->registered) {
429 mutex_unlock(&connector->mutex);
419 return; 430 return;
431 }
420 432
421 if (connector->funcs->early_unregister) 433 if (connector->funcs->early_unregister)
422 connector->funcs->early_unregister(connector); 434 connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
425 drm_debugfs_connector_remove(connector); 437 drm_debugfs_connector_remove(connector);
426 438
427 connector->registered = false; 439 connector->registered = false;
440 mutex_unlock(&connector->mutex);
428} 441}
429EXPORT_SYMBOL(drm_connector_unregister); 442EXPORT_SYMBOL(drm_connector_unregister);
430 443
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a525751b4559..6594b4088f11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
745 if (ret) 745 if (ret)
746 goto err_minors; 746 goto err_minors;
747 747
748 dev->registered = true;
749
748 if (dev->driver->load) { 750 if (dev->driver->load) {
749 ret = dev->driver->load(dev, flags); 751 ret = dev->driver->load(dev, flags);
750 if (ret) 752 if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
785 787
786 drm_lastclose(dev); 788 drm_lastclose(dev);
787 789
790 dev->registered = false;
791
788 if (drm_core_check_feature(dev, DRIVER_MODESET)) 792 if (drm_core_check_feature(dev, DRIVER_MODESET))
789 drm_modeset_unregister_all(dev); 793 drm_modeset_unregister_all(dev);
790 794
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac6a35212501..e6b19bc9021a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1460 return NULL; 1460 return NULL;
1461 1461
1462 mode->type |= DRM_MODE_TYPE_USERDEF; 1462 mode->type |= DRM_MODE_TYPE_USERDEF;
1463 /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
1464 if (cmd->xres == 1366 && mode->hdisplay == 1368) {
1465 mode->hdisplay = 1366;
1466 mode->hsync_start--;
1467 mode->hsync_end--;
1468 drm_mode_set_name(mode);
1469 }
1463 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1470 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1464 return mode; 1471 return mode;
1465} 1472}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ac953f037be7..cf8f0128c161 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
143 } 143 }
144 144
145 if (dev->mode_config.delayed_event) { 145 if (dev->mode_config.delayed_event) {
146 /*
147 * FIXME:
148 *
149 * Use short (1s) delay to handle the initial delayed event.
150 * This delay should not be needed, but Optimus/nouveau will
151 * fail in a mysterious way if the delayed event is handled as
152 * soon as possible like it is done in
153 * drm_helper_probe_single_connector_modes() in case the poll
154 * was enabled before.
155 */
146 poll = true; 156 poll = true;
147 delay = 0; 157 delay = HZ;
148 } 158 }
149 159
150 if (poll) 160 if (poll)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96e8f08..fe0e85b41310 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
116 struct list_head list; 116 struct list_head list;
117 bool found; 117 bool found;
118 118
119 /*
120 * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
121 * drm_mm into giving out a low IOVA after address space
122 * rollover. This needs a proper fix.
123 */
119 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 124 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
120 size, 0, mmu->last_iova, ~0UL, 125 size, 0, mmu->last_iova, ~0UL,
121 DRM_MM_SEARCH_DEFAULT); 126 mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
122 127
123 if (ret != -ENOSPC) 128 if (ret != -ENOSPC)
124 break; 129 break;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f3117fe8..75eeb831ed6a 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
46 BIT_CLKS_ENABLED, 46 BIT_CLKS_ENABLED,
47 BIT_IRQS_ENABLED, 47 BIT_IRQS_ENABLED,
48 BIT_WIN_UPDATED, 48 BIT_WIN_UPDATED,
49 BIT_SUSPENDED 49 BIT_SUSPENDED,
50 BIT_REQUEST_UPDATE
50}; 51};
51 52
52struct decon_context { 53struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
141 m->crtc_vsync_end = m->crtc_vsync_start + 1; 142 m->crtc_vsync_end = m->crtc_vsync_start + 1;
142 } 143 }
143 144
144 decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
145
146 /* enable clock gate */
147 val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
148 writel(val, ctx->addr + DECON_CMU);
149
150 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) 145 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
151 decon_setup_trigger(ctx); 146 decon_setup_trigger(ctx);
152 147
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
315 310
316 /* window enable */ 311 /* window enable */
317 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 312 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
313 set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
318} 314}
319 315
320static void decon_disable_plane(struct exynos_drm_crtc *crtc, 316static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
327 return; 323 return;
328 324
329 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); 325 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
326 set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
330} 327}
331 328
332static void decon_atomic_flush(struct exynos_drm_crtc *crtc) 329static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
340 for (i = ctx->first_win; i < WINDOWS_NR; i++) 337 for (i = ctx->first_win; i < WINDOWS_NR; i++)
341 decon_shadow_protect_win(ctx, i, false); 338 decon_shadow_protect_win(ctx, i, false);
342 339
343 /* standalone update */ 340 if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
344 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 341 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
345 342
346 if (ctx->out_type & IFTYPE_I80) 343 if (ctx->out_type & IFTYPE_I80)
347 set_bit(BIT_WIN_UPDATED, &ctx->flags); 344 set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc4aea6..f7bce8603958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "gvt.h" 38#include "gvt.h"
39 39
40#define MB_TO_BYTES(mb) ((mb) << 20ULL)
41#define BYTES_TO_MB(b) ((b) >> 20ULL)
42
43#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
44#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
45#define HOST_FENCE 4
46
47static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 40static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
48{ 41{
49 struct intel_gvt *gvt = vgpu->gvt; 42 struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
165 POSTING_READ(fence_reg_lo); 158 POSTING_READ(fence_reg_lo);
166} 159}
167 160
161static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
162{
163 int i;
164
165 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
166 intel_vgpu_write_fence(vgpu, i, 0);
167}
168
168static void free_vgpu_fence(struct intel_vgpu *vgpu) 169static void free_vgpu_fence(struct intel_vgpu *vgpu)
169{ 170{
170 struct intel_gvt *gvt = vgpu->gvt; 171 struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
178 intel_runtime_pm_get(dev_priv); 179 intel_runtime_pm_get(dev_priv);
179 180
180 mutex_lock(&dev_priv->drm.struct_mutex); 181 mutex_lock(&dev_priv->drm.struct_mutex);
182 _clear_vgpu_fence(vgpu);
181 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
182 reg = vgpu->fence.regs[i]; 184 reg = vgpu->fence.regs[i];
183 intel_vgpu_write_fence(vgpu, i, 0);
184 list_add_tail(&reg->link, 185 list_add_tail(&reg->link,
185 &dev_priv->mm.fence_list); 186 &dev_priv->mm.fence_list);
186 } 187 }
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
208 continue; 209 continue;
209 list_del(pos); 210 list_del(pos);
210 vgpu->fence.regs[i] = reg; 211 vgpu->fence.regs[i] = reg;
211 intel_vgpu_write_fence(vgpu, i, 0);
212 if (++i == vgpu_fence_sz(vgpu)) 212 if (++i == vgpu_fence_sz(vgpu))
213 break; 213 break;
214 } 214 }
215 if (i != vgpu_fence_sz(vgpu)) 215 if (i != vgpu_fence_sz(vgpu))
216 goto out_free_fence; 216 goto out_free_fence;
217 217
218 _clear_vgpu_fence(vgpu);
219
218 mutex_unlock(&dev_priv->drm.struct_mutex); 220 mutex_unlock(&dev_priv->drm.struct_mutex);
219 intel_runtime_pm_put(dev_priv); 221 intel_runtime_pm_put(dev_priv);
220 return 0; 222 return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
314} 316}
315 317
316/** 318/**
319 * intel_vgpu_reset_resource - reset resource state owned by a vGPU
320 * @vgpu: a vGPU
321 *
322 * This function is used to reset resource state owned by a vGPU.
323 *
324 */
325void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
326{
327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
328
329 intel_runtime_pm_get(dev_priv);
330 _clear_vgpu_fence(vgpu);
331 intel_runtime_pm_put(dev_priv);
332}
333
334/**
317 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU 335 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
318 * @vgpu: vGPU 336 * @vgpu: vGPU
319 * @param: vGPU creation params 337 * @param: vGPU creation params
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 711c31c8d8b4..4a6a2ed65732 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
282 } 282 }
283 return 0; 283 return 0;
284} 284}
285
286/**
287 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
288 *
289 * @vgpu: a vGPU
290 * @primary: is the vGPU presented as primary
291 *
292 */
293void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
294 bool primary)
295{
296 struct intel_gvt *gvt = vgpu->gvt;
297 const struct intel_gvt_device_info *info = &gvt->device_info;
298 u16 *gmch_ctl;
299 int i;
300
301 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
302 info->cfg_space_size);
303
304 if (!primary) {
305 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
306 INTEL_GVT_PCI_CLASS_VGA_OTHER;
307 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
308 INTEL_GVT_PCI_CLASS_VGA_OTHER;
309 }
310
311 /* Show guest that there isn't any stolen memory.*/
312 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
313 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
314
315 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
316 gvt_aperture_pa_base(gvt), true);
317
318 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
319 | PCI_COMMAND_MEMORY
320 | PCI_COMMAND_MASTER);
321 /*
322 * Clear the bar upper 32bit and let guest to assign the new value
323 */
324 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
325 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
326 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
327
328 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
329 vgpu->cfg_space.bar[i].size = pci_resource_len(
330 gvt->dev_priv->drm.pdev, i * 2);
331 vgpu->cfg_space.bar[i].tracked = false;
332 }
333}
334
335/**
336 * intel_vgpu_reset_cfg_space - reset vGPU configuration space
337 *
338 * @vgpu: a vGPU
339 *
340 */
341void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
342{
343 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
344 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
345 INTEL_GVT_PCI_CLASS_VGA_OTHER;
346
347 if (cmd & PCI_COMMAND_MEMORY) {
348 trap_gttmmio(vgpu, false);
349 map_aperture(vgpu, false);
350 }
351
352 /**
353 * Currently we only do such reset when vGPU is not
354 * owned by any VM, so we simply restore entire cfg
355 * space to default value.
356 */
357 intel_vgpu_init_cfg_space(vgpu, primary);
358}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d26a092c70e8..e4563984cb1e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -481,7 +481,6 @@ struct parser_exec_state {
481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482 482
483static unsigned long bypass_scan_mask = 0; 483static unsigned long bypass_scan_mask = 0;
484static bool bypass_batch_buffer_scan = true;
485 484
486/* ring ALL, type = 0 */ 485/* ring ALL, type = 0 */
487static struct sub_op_bits sub_op_mi[] = { 486static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
1525{ 1524{
1526 struct intel_gvt *gvt = s->vgpu->gvt; 1525 struct intel_gvt *gvt = s->vgpu->gvt;
1527 1526
1528 if (bypass_batch_buffer_scan)
1529 return 0;
1530
1531 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1527 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
1532 /* BDW decides privilege based on address space */ 1528 /* BDW decides privilege based on address space */
1533 if (cmd_val(s, 0) & (1 << 8)) 1529 if (cmd_val(s, 0) & (1 << 8))
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f32bb6f6495c..34083731669d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
364#define get_desc_from_elsp_dwords(ed, i) \ 364#define get_desc_from_elsp_dwords(ed, i) \
365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
366 366
367
368#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
369#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
370static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
371 unsigned long add, int gmadr_bytes)
372{
373 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
374 return -1;
375
376 *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
377 BATCH_BUFFER_ADDR_MASK;
378 if (gmadr_bytes == 8) {
379 *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
380 add & BATCH_BUFFER_ADDR_HIGH_MASK;
381 }
382
383 return 0;
384}
385
386static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 367static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
387{ 368{
388 int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 369 const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
370 struct intel_shadow_bb_entry *entry_obj;
389 371
390 /* pin the gem object to ggtt */ 372 /* pin the gem object to ggtt */
391 if (!list_empty(&workload->shadow_bb)) { 373 list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
392 struct intel_shadow_bb_entry *entry_obj = 374 struct i915_vma *vma;
393 list_first_entry(&workload->shadow_bb,
394 struct intel_shadow_bb_entry,
395 list);
396 struct intel_shadow_bb_entry *temp;
397 375
398 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, 376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
399 list) { 377 if (IS_ERR(vma)) {
400 struct i915_vma *vma; 378 gvt_err("Cannot pin\n");
401 379 return;
402 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
403 4, 0);
404 if (IS_ERR(vma)) {
405 gvt_err("Cannot pin\n");
406 return;
407 }
408
409 /* FIXME: we are not tracking our pinned VMA leaving it
410 * up to the core to fix up the stray pin_count upon
411 * free.
412 */
413
414 /* update the relocate gma with shadow batch buffer*/
415 set_gma_to_bb_cmd(entry_obj,
416 i915_ggtt_offset(vma),
417 gmadr_bytes);
418 } 380 }
381
382 /* FIXME: we are not tracking our pinned VMA leaving it
383 * up to the core to fix up the stray pin_count upon
384 * free.
385 */
386
387 /* update the relocate gma with shadow batch buffer*/
388 entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
389 if (gmadr_bytes == 8)
390 entry_obj->bb_start_cmd_va[2] = 0;
419 } 391 }
420} 392}
421 393
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 798 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
827 } 799 }
828 800
829 vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", 801 vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
830 sizeof(struct intel_vgpu_workload), 0, 802 sizeof(struct intel_vgpu_workload), 0,
831 SLAB_HWCACHE_ALIGN, 803 SLAB_HWCACHE_ALIGN,
832 NULL); 804 NULL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6c5fdf5b2ce2..47dec4acf7ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{ 241{
242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
243 u64 pte;
244 243
245#ifdef readq 244 return readq(addr);
246 pte = readq(addr);
247#else
248 pte = ioread32(addr);
249 pte |= (u64)ioread32(addr + 4) << 32;
250#endif
251 return pte;
252} 245}
253 246
254static void write_pte64(struct drm_i915_private *dev_priv, 247static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
256{ 249{
257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
258 251
259#ifdef writeq
260 writeq(pte, addr); 252 writeq(pte, addr);
261#else 253
262 iowrite32((u32)pte, addr);
263 iowrite32(pte >> 32, addr + 4);
264#endif
265 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 254 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
266 POSTING_READ(GFX_FLSH_CNTL_GEN6); 255 POSTING_READ(GFX_FLSH_CNTL_GEN6);
267} 256}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1380 info->gtt_entry_size; 1369 info->gtt_entry_size;
1381 mem = kzalloc(mm->has_shadow_page_table ? 1370 mem = kzalloc(mm->has_shadow_page_table ?
1382 mm->page_table_entry_size * 2 1371 mm->page_table_entry_size * 2
1383 : mm->page_table_entry_size, 1372 : mm->page_table_entry_size, GFP_KERNEL);
1384 GFP_ATOMIC);
1385 if (!mem) 1373 if (!mem)
1386 return -ENOMEM; 1374 return -ENOMEM;
1387 mm->virtual_page_table = mem; 1375 mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1532 struct intel_vgpu_mm *mm; 1520 struct intel_vgpu_mm *mm;
1533 int ret; 1521 int ret;
1534 1522
1535 mm = kzalloc(sizeof(*mm), GFP_ATOMIC); 1523 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1536 if (!mm) { 1524 if (!mm) {
1537 ret = -ENOMEM; 1525 ret = -ENOMEM;
1538 goto fail; 1526 goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1887 int page_entry_num = GTT_PAGE_SIZE >> 1875 int page_entry_num = GTT_PAGE_SIZE >>
1888 vgpu->gvt->device_info.gtt_entry_size_shift; 1876 vgpu->gvt->device_info.gtt_entry_size_shift;
1889 struct page *scratch_pt; 1877 void *scratch_pt;
1890 unsigned long mfn; 1878 unsigned long mfn;
1891 int i; 1879 int i;
1892 void *p;
1893 1880
1894 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1881 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1895 return -EINVAL; 1882 return -EINVAL;
1896 1883
1897 scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 1884 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1898 if (!scratch_pt) { 1885 if (!scratch_pt) {
1899 gvt_err("fail to allocate scratch page\n"); 1886 gvt_err("fail to allocate scratch page\n");
1900 return -ENOMEM; 1887 return -ENOMEM;
1901 } 1888 }
1902 1889
1903 p = kmap_atomic(scratch_pt); 1890 mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
1904 mfn = intel_gvt_hypervisor_virt_to_mfn(p);
1905 if (mfn == INTEL_GVT_INVALID_ADDR) { 1891 if (mfn == INTEL_GVT_INVALID_ADDR) {
1906 gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); 1892 gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
1907 kunmap_atomic(p); 1893 free_page((unsigned long)scratch_pt);
1908 __free_page(scratch_pt);
1909 return -EFAULT; 1894 return -EFAULT;
1910 } 1895 }
1911 gtt->scratch_pt[type].page_mfn = mfn; 1896 gtt->scratch_pt[type].page_mfn = mfn;
1912 gtt->scratch_pt[type].page = scratch_pt; 1897 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
1913 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1898 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1914 vgpu->id, type, mfn); 1899 vgpu->id, type, mfn);
1915 1900
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1903 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1919 * 'type' pt. 1904 * 'type' pt.
1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1905 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1921 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self 1906 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1907 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1923 */ 1908 */
1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1909 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1936 se.val64 |= PPAT_CACHED_INDEX; 1921 se.val64 |= PPAT_CACHED_INDEX;
1937 1922
1938 for (i = 0; i < page_entry_num; i++) 1923 for (i = 0; i < page_entry_num; i++)
1939 ops->set_entry(p, &se, i, false, 0, vgpu); 1924 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
1940 } 1925 }
1941 1926
1942 kunmap_atomic(p);
1943
1944 return 0; 1927 return 0;
1945} 1928}
1946 1929
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2208int intel_gvt_init_gtt(struct intel_gvt *gvt) 2191int intel_gvt_init_gtt(struct intel_gvt *gvt)
2209{ 2192{
2210 int ret; 2193 int ret;
2211 void *page_addr; 2194 void *page;
2212 2195
2213 gvt_dbg_core("init gtt\n"); 2196 gvt_dbg_core("init gtt\n");
2214 2197
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2221 return -ENODEV; 2204 return -ENODEV;
2222 } 2205 }
2223 2206
2224 gvt->gtt.scratch_ggtt_page = 2207 page = (void *)get_zeroed_page(GFP_KERNEL);
2225 alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 2208 if (!page) {
2226 if (!gvt->gtt.scratch_ggtt_page) {
2227 gvt_err("fail to allocate scratch ggtt page\n"); 2209 gvt_err("fail to allocate scratch ggtt page\n");
2228 return -ENOMEM; 2210 return -ENOMEM;
2229 } 2211 }
2212 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2230 2213
2231 page_addr = page_address(gvt->gtt.scratch_ggtt_page); 2214 gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
2232
2233 gvt->gtt.scratch_ggtt_mfn =
2234 intel_gvt_hypervisor_virt_to_mfn(page_addr);
2235 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2215 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
2236 gvt_err("fail to translate scratch ggtt page\n"); 2216 gvt_err("fail to translate scratch ggtt page\n");
2237 __free_page(gvt->gtt.scratch_ggtt_page); 2217 __free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2297 for (offset = 0; offset < num_entries; offset++) 2277 for (offset = 0; offset < num_entries; offset++)
2298 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2278 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2299} 2279}
2280
2281/**
2282 * intel_vgpu_reset_gtt - reset the all GTT related status
2283 * @vgpu: a vGPU
2284 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
2285 *
2286 * This function is called from vfio core to reset reset all
2287 * GTT related status, including GGTT, PPGTT, scratch page.
2288 *
2289 */
2290void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2291{
2292 int i;
2293
2294 ppgtt_free_all_shadow_page(vgpu);
2295 if (!dmlr)
2296 return;
2297
2298 intel_vgpu_reset_ggtt(vgpu);
2299
2300 /* clear scratch page for security */
2301 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2302 if (vgpu->gtt.scratch_pt[i].page != NULL)
2303 memset(page_address(vgpu->gtt.scratch_pt[i].page),
2304 0, PAGE_SIZE);
2305 }
2306}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b315ab3593ec..f88eb5e89bea 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
208void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 208void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
209 209
210extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 210extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
211extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
211extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 212extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
212 213
213extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 214extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c3d2fd..e6bf5c533fbe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
202 intel_gvt_clean_vgpu_types(gvt); 202 intel_gvt_clean_vgpu_types(gvt);
203 203
204 idr_destroy(&gvt->vgpu_idr);
205
204 kfree(dev_priv->gvt); 206 kfree(dev_priv->gvt);
205 dev_priv->gvt = NULL; 207 dev_priv->gvt = NULL;
206} 208}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
237 239
238 gvt_dbg_core("init gvt device\n"); 240 gvt_dbg_core("init gvt device\n");
239 241
242 idr_init(&gvt->vgpu_idr);
243
240 mutex_init(&gvt->lock); 244 mutex_init(&gvt->lock);
241 gvt->dev_priv = dev_priv; 245 gvt->dev_priv = dev_priv;
242 246
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
244 248
245 ret = intel_gvt_setup_mmio_info(gvt); 249 ret = intel_gvt_setup_mmio_info(gvt);
246 if (ret) 250 if (ret)
247 return ret; 251 goto out_clean_idr;
248 252
249 ret = intel_gvt_load_firmware(gvt); 253 ret = intel_gvt_load_firmware(gvt);
250 if (ret) 254 if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
313 intel_gvt_free_firmware(gvt); 317 intel_gvt_free_firmware(gvt);
314out_clean_mmio_info: 318out_clean_mmio_info:
315 intel_gvt_clean_mmio_info(gvt); 319 intel_gvt_clean_mmio_info(gvt);
320out_clean_idr:
321 idr_destroy(&gvt->vgpu_idr);
316 kfree(gvt); 322 kfree(gvt);
317 return ret; 323 return ret;
318} 324}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0af17016f33f..e227caf5859e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
323 323
324int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 324int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
325 struct intel_vgpu_creation_params *param); 325 struct intel_vgpu_creation_params *param);
326void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
326void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 327void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
327void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 328void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
328 u32 fence, u64 value); 329 u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
375struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 376struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
376 struct intel_vgpu_type *type); 377 struct intel_vgpu_type *type);
377void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 378void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
379void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
380 unsigned int engine_mask);
378void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 381void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
379 382
380 383
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
411int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 414int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
412 unsigned long *g_index); 415 unsigned long *g_index);
413 416
417void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
418 bool primary);
419void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
420
414int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 421int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
415 void *p_data, unsigned int bytes); 422 void *p_data, unsigned int bytes);
416 423
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
424int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 431int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
425 432
426int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 433int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
427int setup_vgpu_mmio(struct intel_vgpu *vgpu);
428void populate_pvinfo_page(struct intel_vgpu *vgpu); 434void populate_pvinfo_page(struct intel_vgpu *vgpu);
429 435
430struct intel_gvt_ops { 436struct intel_gvt_ops {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 522809710312..ab2ea157da4c 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
93static int new_mmio_info(struct intel_gvt *gvt, 93static int new_mmio_info(struct intel_gvt *gvt,
94 u32 offset, u32 flags, u32 size, 94 u32 offset, u32 flags, u32 size,
95 u32 addr_mask, u32 ro_mask, u32 device, 95 u32 addr_mask, u32 ro_mask, u32 device,
96 void *read, void *write) 96 int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
97 int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
97{ 98{
98 struct intel_gvt_mmio_info *info, *p; 99 struct intel_gvt_mmio_info *info, *p;
99 u32 start, end, i; 100 u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
219 default: 220 default:
220 /*should not hit here*/ 221 /*should not hit here*/
221 gvt_err("invalid forcewake offset 0x%x\n", offset); 222 gvt_err("invalid forcewake offset 0x%x\n", offset);
222 return 1; 223 return -EINVAL;
223 } 224 }
224 } else { 225 } else {
225 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; 226 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
230 return 0; 231 return 0;
231} 232}
232 233
233static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
234 void *p_data, unsigned int bytes, unsigned long bitmap)
235{
236 struct intel_gvt_workload_scheduler *scheduler =
237 &vgpu->gvt->scheduler;
238
239 vgpu->resetting = true;
240
241 intel_vgpu_stop_schedule(vgpu);
242 /*
243 * The current_vgpu will set to NULL after stopping the
244 * scheduler when the reset is triggered by current vgpu.
245 */
246 if (scheduler->current_vgpu == NULL) {
247 mutex_unlock(&vgpu->gvt->lock);
248 intel_gvt_wait_vgpu_idle(vgpu);
249 mutex_lock(&vgpu->gvt->lock);
250 }
251
252 intel_vgpu_reset_execlist(vgpu, bitmap);
253
254 /* full GPU reset */
255 if (bitmap == 0xff) {
256 mutex_unlock(&vgpu->gvt->lock);
257 intel_vgpu_clean_gtt(vgpu);
258 mutex_lock(&vgpu->gvt->lock);
259 setup_vgpu_mmio(vgpu);
260 populate_pvinfo_page(vgpu);
261 intel_vgpu_init_gtt(vgpu);
262 }
263
264 vgpu->resetting = false;
265
266 return 0;
267}
268
269static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 234static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
270 void *p_data, unsigned int bytes) 235 void *p_data, unsigned int bytes)
271{ 236{
237 unsigned int engine_mask = 0;
272 u32 data; 238 u32 data;
273 u64 bitmap = 0;
274 239
275 write_vreg(vgpu, offset, p_data, bytes); 240 write_vreg(vgpu, offset, p_data, bytes);
276 data = vgpu_vreg(vgpu, offset); 241 data = vgpu_vreg(vgpu, offset);
277 242
278 if (data & GEN6_GRDOM_FULL) { 243 if (data & GEN6_GRDOM_FULL) {
279 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 244 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
280 bitmap = 0xff; 245 engine_mask = ALL_ENGINES;
281 } 246 } else {
282 if (data & GEN6_GRDOM_RENDER) { 247 if (data & GEN6_GRDOM_RENDER) {
283 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 248 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
284 bitmap |= (1 << RCS); 249 engine_mask |= (1 << RCS);
285 } 250 }
286 if (data & GEN6_GRDOM_MEDIA) { 251 if (data & GEN6_GRDOM_MEDIA) {
287 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 252 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
288 bitmap |= (1 << VCS); 253 engine_mask |= (1 << VCS);
289 } 254 }
290 if (data & GEN6_GRDOM_BLT) { 255 if (data & GEN6_GRDOM_BLT) {
291 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 256 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
292 bitmap |= (1 << BCS); 257 engine_mask |= (1 << BCS);
293 } 258 }
294 if (data & GEN6_GRDOM_VECS) { 259 if (data & GEN6_GRDOM_VECS) {
295 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 260 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
296 bitmap |= (1 << VECS); 261 engine_mask |= (1 << VECS);
297 } 262 }
298 if (data & GEN8_GRDOM_MEDIA2) { 263 if (data & GEN8_GRDOM_MEDIA2) {
299 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 264 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
300 if (HAS_BSD2(vgpu->gvt->dev_priv)) 265 if (HAS_BSD2(vgpu->gvt->dev_priv))
301 bitmap |= (1 << VCS2); 266 engine_mask |= (1 << VCS2);
267 }
302 } 268 }
303 return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); 269
270 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
271
272 return 0;
304} 273}
305 274
306static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 275static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
974 return 0; 943 return 0;
975} 944}
976 945
977static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 946static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
978 void *p_data, unsigned int bytes) 947 void *p_data, unsigned int bytes)
979{ 948{
980 u32 data; 949 u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1366static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1335static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1367 unsigned int offset, void *p_data, unsigned int bytes) 1336 unsigned int offset, void *p_data, unsigned int bytes)
1368{ 1337{
1369 int rc = 0;
1370 unsigned int id = 0; 1338 unsigned int id = 0;
1371 1339
1372 write_vreg(vgpu, offset, p_data, bytes); 1340 write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1389 id = VECS; 1357 id = VECS;
1390 break; 1358 break;
1391 default: 1359 default:
1392 rc = -EINVAL; 1360 return -EINVAL;
1393 break;
1394 } 1361 }
1395 set_bit(id, (void *)vgpu->tlb_handle_pending); 1362 set_bit(id, (void *)vgpu->tlb_handle_pending);
1396 1363
1397 return rc; 1364 return 0;
1398} 1365}
1399 1366
1400static int ring_reset_ctl_write(struct intel_vgpu *vgpu, 1367static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faaae07ae487..3f656e3a6e5a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
230 return NULL; 230 return NULL;
231} 231}
232 232
233static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 233static ssize_t available_instances_show(struct kobject *kobj,
234 char *buf) 234 struct device *dev, char *buf)
235{ 235{
236 struct intel_vgpu_type *type; 236 struct intel_vgpu_type *type;
237 unsigned int num = 0; 237 unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
269 type->fence); 269 type->fence);
270} 270}
271 271
272static MDEV_TYPE_ATTR_RO(available_instance); 272static MDEV_TYPE_ATTR_RO(available_instances);
273static MDEV_TYPE_ATTR_RO(device_api); 273static MDEV_TYPE_ATTR_RO(device_api);
274static MDEV_TYPE_ATTR_RO(description); 274static MDEV_TYPE_ATTR_RO(description);
275 275
276static struct attribute *type_attrs[] = { 276static struct attribute *type_attrs[] = {
277 &mdev_type_attr_available_instance.attr, 277 &mdev_type_attr_available_instances.attr,
278 &mdev_type_attr_device_api.attr, 278 &mdev_type_attr_device_api.attr,
279 &mdev_type_attr_description.attr, 279 &mdev_type_attr_description.attr,
280 NULL, 280 NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
398 struct intel_vgpu_type *type; 398 struct intel_vgpu_type *type;
399 struct device *pdev; 399 struct device *pdev;
400 void *gvt; 400 void *gvt;
401 int ret;
401 402
402 pdev = mdev_parent_dev(mdev); 403 pdev = mdev_parent_dev(mdev);
403 gvt = kdev_to_i915(pdev)->gvt; 404 gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
406 if (!type) { 407 if (!type) {
407 gvt_err("failed to find type %s to create\n", 408 gvt_err("failed to find type %s to create\n",
408 kobject_name(kobj)); 409 kobject_name(kobj));
409 return -EINVAL; 410 ret = -EINVAL;
411 goto out;
410 } 412 }
411 413
412 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
413 if (IS_ERR_OR_NULL(vgpu)) { 415 if (IS_ERR_OR_NULL(vgpu)) {
414 gvt_err("create intel vgpu failed\n"); 416 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
415 return -EINVAL; 417 gvt_err("failed to create intel vgpu: %d\n", ret);
418 goto out;
416 } 419 }
417 420
418 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); 421 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
422 425
423 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 426 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
424 dev_name(mdev_dev(mdev))); 427 dev_name(mdev_dev(mdev)));
425 return 0; 428 ret = 0;
429
430out:
431 return ret;
426} 432}
427 433
428static int intel_vgpu_remove(struct mdev_device *mdev) 434static int intel_vgpu_remove(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450a1946..4df078bc5d04 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
126 goto err; 126 goto err;
127 127
128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
129 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
131 vgpu->id, offset, bytes, *(u32 *)p_data);
132
133 if (offset == 0x206c) {
134 gvt_err("------------------------------------------\n");
135 gvt_err("vgpu%d: likely triggers a gfx reset\n",
136 vgpu->id);
137 gvt_err("------------------------------------------\n");
138 vgpu->mmio.disable_warn_untrack = true;
139 }
140 }
141
142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
143 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 129 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
144 goto err; 130 goto err;
145 } 131 }
146 132
133 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
147 if (mmio) { 134 if (mmio) {
148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
152 goto err; 139 goto err;
153 } 140 }
154 ret = mmio->read(vgpu, offset, p_data, bytes); 141 ret = mmio->read(vgpu, offset, p_data, bytes);
155 } else 142 } else {
156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 143 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
157 144
145 if (!vgpu->mmio.disable_warn_untrack) {
146 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
147 vgpu->id, offset, bytes, *(u32 *)p_data);
148
149 if (offset == 0x206c) {
150 gvt_err("------------------------------------------\n");
151 gvt_err("vgpu%d: likely triggers a gfx reset\n",
152 vgpu->id);
153 gvt_err("------------------------------------------\n");
154 vgpu->mmio.disable_warn_untrack = true;
155 }
156 }
157 }
158
158 if (ret) 159 if (ret)
159 goto err; 160 goto err;
160 161
@@ -302,3 +303,56 @@ err:
302 mutex_unlock(&gvt->lock); 303 mutex_unlock(&gvt->lock);
303 return ret; 304 return ret;
304} 305}
306
307
308/**
309 * intel_vgpu_reset_mmio - reset virtual MMIO space
310 * @vgpu: a vGPU
311 *
312 */
313void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
314{
315 struct intel_gvt *gvt = vgpu->gvt;
316 const struct intel_gvt_device_info *info = &gvt->device_info;
317
318 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
319 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
320
321 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
322
323 /* set the bit 0:2(Core C-State ) to C0 */
324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
325}
326
327/**
328 * intel_vgpu_init_mmio - init MMIO space
329 * @vgpu: a vGPU
330 *
331 * Returns:
332 * Zero on success, negative error code if failed
333 */
334int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
335{
336 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
337
338 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
339 if (!vgpu->mmio.vreg)
340 return -ENOMEM;
341
342 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
343
344 intel_vgpu_reset_mmio(vgpu);
345
346 return 0;
347}
348
349/**
350 * intel_vgpu_clean_mmio - clean MMIO space
351 * @vgpu: a vGPU
352 *
353 */
354void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
355{
356 vfree(vgpu->mmio.vreg);
357 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
358}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 87d5b5e366a3..3bc620f56f35 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
86 *offset; \ 86 *offset; \
87}) 87})
88 88
89int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
90void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
91void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
92
89int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 93int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
90 94
91int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, 95int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 81cd921770c6..d9fb41ab7119 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
36 vgpu->id)) 36 vgpu->id))
37 return -EINVAL; 37 return -EINVAL;
38 38
39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | 39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
40 GFP_DMA32 | __GFP_ZERO, 40 __GFP_ZERO,
41 INTEL_GVT_OPREGION_PORDER); 41 get_order(INTEL_GVT_OPREGION_SIZE));
42 42
43 if (!vgpu_opregion(vgpu)->va) 43 if (!vgpu_opregion(vgpu)->va)
44 return -ENOMEM; 44 return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
98 map_vgpu_opregion(vgpu, false); 98 map_vgpu_opregion(vgpu, false);
99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
100 INTEL_GVT_OPREGION_PORDER); 100 get_order(INTEL_GVT_OPREGION_SIZE));
101 101
102 vgpu_opregion(vgpu)->va = NULL; 102 vgpu_opregion(vgpu)->va = NULL;
103 } 103 }
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789d8f02..fbd023a16f18 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
50#define INTEL_GVT_OPREGION_PARM 0x204 50#define INTEL_GVT_OPREGION_PARM 0x204
51 51
52#define INTEL_GVT_OPREGION_PAGES 2 52#define INTEL_GVT_OPREGION_PAGES 2
53#define INTEL_GVT_OPREGION_PORDER 1 53#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
54#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
55 54
56#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
57 56
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db242250235..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
350{ 350{
351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
352 struct intel_vgpu_workload *workload; 352 struct intel_vgpu_workload *workload;
353 struct intel_vgpu *vgpu;
353 int event; 354 int event;
354 355
355 mutex_lock(&gvt->lock); 356 mutex_lock(&gvt->lock);
356 357
357 workload = scheduler->current_workload[ring_id]; 358 workload = scheduler->current_workload[ring_id];
359 vgpu = workload->vgpu;
358 360
359 if (!workload->status && !workload->vgpu->resetting) { 361 if (!workload->status && !vgpu->resetting) {
360 wait_event(workload->shadow_ctx_status_wq, 362 wait_event(workload->shadow_ctx_status_wq,
361 !atomic_read(&workload->shadow_ctx_active)); 363 !atomic_read(&workload->shadow_ctx_active));
362 364
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
364 366
365 for_each_set_bit(event, workload->pending_events, 367 for_each_set_bit(event, workload->pending_events,
366 INTEL_GVT_EVENT_MAX) 368 INTEL_GVT_EVENT_MAX)
367 intel_vgpu_trigger_virtual_event(workload->vgpu, 369 intel_vgpu_trigger_virtual_event(vgpu, event);
368 event);
369 } 370 }
370 371
371 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 372 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
373 374
374 scheduler->current_workload[ring_id] = NULL; 375 scheduler->current_workload[ring_id] = NULL;
375 376
376 atomic_dec(&workload->vgpu->running_workload_num);
377
378 list_del_init(&workload->list); 377 list_del_init(&workload->list);
379 workload->complete(workload); 378 workload->complete(workload);
380 379
380 atomic_dec(&vgpu->running_workload_num);
381 wake_up(&scheduler->workload_complete_wq); 381 wake_up(&scheduler->workload_complete_wq);
382 mutex_unlock(&gvt->lock); 382 mutex_unlock(&gvt->lock);
383} 383}
@@ -459,11 +459,11 @@ complete:
459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n",
460 workload, workload->status); 460 workload, workload->status);
461 461
462 complete_current_workload(gvt, ring_id);
463
464 if (workload->req) 462 if (workload->req)
465 i915_gem_request_put(fetch_and_zero(&workload->req)); 463 i915_gem_request_put(fetch_and_zero(&workload->req));
466 464
465 complete_current_workload(gvt, ring_id);
466
467 if (need_force_wake) 467 if (need_force_wake)
468 intel_uncore_forcewake_put(gvt->dev_priv, 468 intel_uncore_forcewake_put(gvt->dev_priv,
469 FORCEWAKE_ALL); 469 FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3b30c28bff51..2833dfa8c9ae 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
113 struct drm_i915_gem_object *obj; 113 struct drm_i915_gem_object *obj;
114 void *va; 114 void *va;
115 unsigned long len; 115 unsigned long len;
116 void *bb_start_cmd_va; 116 u32 *bb_start_cmd_va;
117}; 117};
118 118
119#define workload_q_head(vgpu, ring_id) \ 119#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9d5777..7295bc8e12fb 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -35,79 +35,6 @@
35#include "gvt.h" 35#include "gvt.h"
36#include "i915_pvinfo.h" 36#include "i915_pvinfo.h"
37 37
38static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
39{
40 vfree(vgpu->mmio.vreg);
41 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
42}
43
44int setup_vgpu_mmio(struct intel_vgpu *vgpu)
45{
46 struct intel_gvt *gvt = vgpu->gvt;
47 const struct intel_gvt_device_info *info = &gvt->device_info;
48
49 if (vgpu->mmio.vreg)
50 memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
51 else {
52 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
53 if (!vgpu->mmio.vreg)
54 return -ENOMEM;
55 }
56
57 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
58
59 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
60 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
61
62 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
63
64 /* set the bit 0:2(Core C-State ) to C0 */
65 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
66 return 0;
67}
68
69static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
70 struct intel_vgpu_creation_params *param)
71{
72 struct intel_gvt *gvt = vgpu->gvt;
73 const struct intel_gvt_device_info *info = &gvt->device_info;
74 u16 *gmch_ctl;
75 int i;
76
77 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
78 info->cfg_space_size);
79
80 if (!param->primary) {
81 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
82 INTEL_GVT_PCI_CLASS_VGA_OTHER;
83 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
84 INTEL_GVT_PCI_CLASS_VGA_OTHER;
85 }
86
87 /* Show guest that there isn't any stolen memory.*/
88 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
89 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
90
91 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
92 gvt_aperture_pa_base(gvt), true);
93
94 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
95 | PCI_COMMAND_MEMORY
96 | PCI_COMMAND_MASTER);
97 /*
98 * Clear the bar upper 32bit and let guest to assign the new value
99 */
100 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
101 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
102 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
103
104 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
105 vgpu->cfg_space.bar[i].size = pci_resource_len(
106 gvt->dev_priv->drm.pdev, i * 2);
107 vgpu->cfg_space.bar[i].tracked = false;
108 }
109}
110
111void populate_pvinfo_page(struct intel_vgpu *vgpu) 38void populate_pvinfo_page(struct intel_vgpu *vgpu)
112{ 39{
113 /* setup the ballooning information */ 40 /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
177 if (low_avail / min_low == 0) 104 if (low_avail / min_low == 0)
178 break; 105 break;
179 gvt->types[i].low_gm_size = min_low; 106 gvt->types[i].low_gm_size = min_low;
180 gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; 107 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
181 gvt->types[i].fence = 4; 108 gvt->types[i].fence = 4;
182 gvt->types[i].max_instance = low_avail / min_low; 109 gvt->types[i].max_instance = low_avail / min_low;
183 gvt->types[i].avail_instance = gvt->types[i].max_instance; 110 gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
217 */ 144 */
218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 145 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
219 gvt->gm.vgpu_allocated_low_gm_size; 146 gvt->gm.vgpu_allocated_low_gm_size;
220 high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - 147 high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
221 gvt->gm.vgpu_allocated_high_gm_size; 148 gvt->gm.vgpu_allocated_high_gm_size;
222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 149 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
223 gvt->fence.vgpu_allocated_fence_num; 150 gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
268 intel_vgpu_clean_gtt(vgpu); 195 intel_vgpu_clean_gtt(vgpu);
269 intel_gvt_hypervisor_detach_vgpu(vgpu); 196 intel_gvt_hypervisor_detach_vgpu(vgpu);
270 intel_vgpu_free_resource(vgpu); 197 intel_vgpu_free_resource(vgpu);
271 clean_vgpu_mmio(vgpu); 198 intel_vgpu_clean_mmio(vgpu);
272 vfree(vgpu); 199 vfree(vgpu);
273 200
274 intel_gvt_update_vgpu_types(gvt); 201 intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
300 vgpu->gvt = gvt; 227 vgpu->gvt = gvt;
301 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 228 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
302 229
303 setup_vgpu_cfg_space(vgpu, param); 230 intel_vgpu_init_cfg_space(vgpu, param->primary);
304 231
305 ret = setup_vgpu_mmio(vgpu); 232 ret = intel_vgpu_init_mmio(vgpu);
306 if (ret) 233 if (ret)
307 goto out_free_vgpu; 234 goto out_clean_idr;
308 235
309 ret = intel_vgpu_alloc_resource(vgpu, param); 236 ret = intel_vgpu_alloc_resource(vgpu, param);
310 if (ret) 237 if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
354out_clean_vgpu_resource: 281out_clean_vgpu_resource:
355 intel_vgpu_free_resource(vgpu); 282 intel_vgpu_free_resource(vgpu);
356out_clean_vgpu_mmio: 283out_clean_vgpu_mmio:
357 clean_vgpu_mmio(vgpu); 284 intel_vgpu_clean_mmio(vgpu);
285out_clean_idr:
286 idr_remove(&gvt->vgpu_idr, vgpu->id);
358out_free_vgpu: 287out_free_vgpu:
359 vfree(vgpu); 288 vfree(vgpu);
360 mutex_unlock(&gvt->lock); 289 mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
398} 327}
399 328
400/** 329/**
401 * intel_gvt_reset_vgpu - reset a virtual GPU 330 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
331 * @vgpu: virtual GPU
332 * @dmlr: vGPU Device Model Level Reset or GT Reset
333 * @engine_mask: engines to reset for GT reset
334 *
335 * This function is called when user wants to reset a virtual GPU through
336 * device model reset or GT reset. The caller should hold the gvt lock.
337 *
338 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
339 * the whole vGPU to default state as when it is created. This vGPU function
340 * is required both for functionary and security concerns.The ultimate goal
341 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
342 * assign a vGPU to a virtual machine we must isse such reset first.
343 *
344 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
345 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
346 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
347 * the reset request. Guest driver can issue a GT reset by programming the
348 * virtual GDRST register to reset specific virtual GPU engine or all
349 * engines.
350 *
351 * The parameter dev_level is to identify if we will do DMLR or GT reset.
352 * The parameter engine_mask is to specific the engines that need to be
353 * resetted. If value ALL_ENGINES is given for engine_mask, it means
354 * the caller requests a full GT reset that we will reset all virtual
355 * GPU engines. For FLR, engine_mask is ignored.
356 */
357void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
358 unsigned int engine_mask)
359{
360 struct intel_gvt *gvt = vgpu->gvt;
361 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
362
363 gvt_dbg_core("------------------------------------------\n");
364 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
365 vgpu->id, dmlr, engine_mask);
366 vgpu->resetting = true;
367
368 intel_vgpu_stop_schedule(vgpu);
369 /*
370 * The current_vgpu will set to NULL after stopping the
371 * scheduler when the reset is triggered by current vgpu.
372 */
373 if (scheduler->current_vgpu == NULL) {
374 mutex_unlock(&gvt->lock);
375 intel_gvt_wait_vgpu_idle(vgpu);
376 mutex_lock(&gvt->lock);
377 }
378
379 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
380
381 /* full GPU reset or device model level reset */
382 if (engine_mask == ALL_ENGINES || dmlr) {
383 intel_vgpu_reset_gtt(vgpu, dmlr);
384 intel_vgpu_reset_resource(vgpu);
385 intel_vgpu_reset_mmio(vgpu);
386 populate_pvinfo_page(vgpu);
387
388 if (dmlr)
389 intel_vgpu_reset_cfg_space(vgpu);
390 }
391
392 vgpu->resetting = false;
393 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
394 gvt_dbg_core("------------------------------------------\n");
395}
396
397/**
398 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
402 * @vgpu: virtual GPU 399 * @vgpu: virtual GPU
403 * 400 *
404 * This function is called when user wants to reset a virtual GPU. 401 * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
406 */ 403 */
407void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 404void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
408{ 405{
406 mutex_lock(&vgpu->gvt->lock);
407 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
408 mutex_unlock(&vgpu->gvt->lock);
409} 409}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 445fec9c2841..b2c4a0b8a627 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
2378 2378
2379 assert_forcewakes_inactive(dev_priv); 2379 assert_forcewakes_inactive(dev_priv);
2380 2380
2381 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2381 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2382 intel_hpd_poll_init(dev_priv); 2382 intel_hpd_poll_init(dev_priv);
2383 2383
2384 DRM_DEBUG_KMS("Device suspended\n"); 2384 DRM_DEBUG_KMS("Device suspended\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 243224aeabf8..8493e19b563a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1012,6 +1012,8 @@ struct intel_fbc {
1012 struct work_struct underrun_work; 1012 struct work_struct underrun_work;
1013 1013
1014 struct intel_fbc_state_cache { 1014 struct intel_fbc_state_cache {
1015 struct i915_vma *vma;
1016
1015 struct { 1017 struct {
1016 unsigned int mode_flags; 1018 unsigned int mode_flags;
1017 uint32_t hsw_bdw_pixel_rate; 1019 uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
1025 } plane; 1027 } plane;
1026 1028
1027 struct { 1029 struct {
1028 u64 ilk_ggtt_offset;
1029 uint32_t pixel_format; 1030 uint32_t pixel_format;
1030 unsigned int stride; 1031 unsigned int stride;
1031 int fence_reg;
1032 unsigned int tiling_mode;
1033 } fb; 1032 } fb;
1034 } state_cache; 1033 } state_cache;
1035 1034
1036 struct intel_fbc_reg_params { 1035 struct intel_fbc_reg_params {
1036 struct i915_vma *vma;
1037
1037 struct { 1038 struct {
1038 enum pipe pipe; 1039 enum pipe pipe;
1039 enum plane plane; 1040 enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
1041 } crtc; 1042 } crtc;
1042 1043
1043 struct { 1044 struct {
1044 u64 ggtt_offset;
1045 uint32_t pixel_format; 1045 uint32_t pixel_format;
1046 unsigned int stride; 1046 unsigned int stride;
1047 int fence_reg;
1048 } fb; 1047 } fb;
1049 1048
1050 int cfb_size; 1049 int cfb_size;
@@ -1977,6 +1976,11 @@ struct drm_i915_private {
1977 1976
1978 struct i915_frontbuffer_tracking fb_tracking; 1977 struct i915_frontbuffer_tracking fb_tracking;
1979 1978
1979 struct intel_atomic_helper {
1980 struct llist_head free_list;
1981 struct work_struct free_work;
1982 } atomic_helper;
1983
1980 u16 orig_clock; 1984 u16 orig_clock;
1981 1985
1982 bool mchbar_need_disable; 1986 bool mchbar_need_disable;
@@ -3163,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
3163 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3167 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
3164} 3168}
3165 3169
3166static inline unsigned long
3167i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
3168 const struct i915_ggtt_view *view)
3169{
3170 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
3171}
3172
3173/* i915_gem_fence_reg.c */ 3170/* i915_gem_fence_reg.c */
3174int __must_check i915_vma_get_fence(struct i915_vma *vma); 3171int __must_check i915_vma_get_fence(struct i915_vma *vma);
3175int __must_check i915_vma_put_fence(struct i915_vma *vma); 3172int __must_check i915_vma_put_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dd7fc662859..4b23a7814713 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
595 struct drm_i915_gem_pwrite *args, 595 struct drm_i915_gem_pwrite *args,
596 struct drm_file *file) 596 struct drm_file *file)
597{ 597{
598 struct drm_device *dev = obj->base.dev;
599 void *vaddr = obj->phys_handle->vaddr + args->offset; 598 void *vaddr = obj->phys_handle->vaddr + args->offset;
600 char __user *user_data = u64_to_user_ptr(args->data_ptr); 599 char __user *user_data = u64_to_user_ptr(args->data_ptr);
601 int ret;
602 600
603 /* We manually control the domain here and pretend that it 601 /* We manually control the domain here and pretend that it
604 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 602 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
605 */ 603 */
606 lockdep_assert_held(&obj->base.dev->struct_mutex);
607 ret = i915_gem_object_wait(obj,
608 I915_WAIT_INTERRUPTIBLE |
609 I915_WAIT_LOCKED |
610 I915_WAIT_ALL,
611 MAX_SCHEDULE_TIMEOUT,
612 to_rps_client(file));
613 if (ret)
614 return ret;
615
616 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 604 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
617 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 605 if (copy_from_user(vaddr, user_data, args->size))
618 unsigned long unwritten; 606 return -EFAULT;
619
620 /* The physical object once assigned is fixed for the lifetime
621 * of the obj, so we can safely drop the lock and continue
622 * to access vaddr.
623 */
624 mutex_unlock(&dev->struct_mutex);
625 unwritten = copy_from_user(vaddr, user_data, args->size);
626 mutex_lock(&dev->struct_mutex);
627 if (unwritten) {
628 ret = -EFAULT;
629 goto out;
630 }
631 }
632 607
633 drm_clflush_virt_range(vaddr, args->size); 608 drm_clflush_virt_range(vaddr, args->size);
634 i915_gem_chipset_flush(to_i915(dev)); 609 i915_gem_chipset_flush(to_i915(obj->base.dev));
635 610
636out:
637 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 611 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
638 return ret; 612 return 0;
639} 613}
640 614
641void *i915_gem_object_alloc(struct drm_device *dev) 615void *i915_gem_object_alloc(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bd08814b015c..d534a316a16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -199,6 +199,7 @@ found:
199 } 199 }
200 200
201 /* Unbinding will emit any required flushes */ 201 /* Unbinding will emit any required flushes */
202 ret = 0;
202 while (!list_empty(&eviction_list)) { 203 while (!list_empty(&eviction_list)) {
203 vma = list_first_entry(&eviction_list, 204 vma = list_first_entry(&eviction_list,
204 struct i915_vma, 205 struct i915_vma,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a792dcb902b5..e924a9516079 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
185 return ret; 185 return ret;
186 } 186 }
187 187
188 trace_i915_vma_bind(vma, bind_flags);
188 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 189 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
189 if (ret) 190 if (ret)
190 return ret; 191 return ret;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dbe9fb41ae53..8d3e515f27ba 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
85 85
86 __drm_atomic_helper_plane_duplicate_state(plane, state); 86 __drm_atomic_helper_plane_duplicate_state(plane, state);
87 87
88 intel_state->vma = NULL;
89
88 return state; 90 return state;
89} 91}
90 92
@@ -100,6 +102,24 @@ void
100intel_plane_destroy_state(struct drm_plane *plane, 102intel_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 103 struct drm_plane_state *state)
102{ 104{
105 struct i915_vma *vma;
106
107 vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
108
109 /*
110 * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
111 * We currently don't clear all planes during driver unload, so we have
112 * to be able to unpin vma here for now.
113 *
114 * Normally this can only happen during unload when kmscon is disabled
115 * and userspace doesn't attempt to set a framebuffer at all.
116 */
117 if (vma) {
118 mutex_lock(&plane->dev->struct_mutex);
119 intel_unpin_fb_vma(vma);
120 mutex_unlock(&plane->dev->struct_mutex);
121 }
122
103 drm_atomic_helper_plane_destroy_state(plane, state); 123 drm_atomic_helper_plane_destroy_state(plane, state);
104} 124}
105 125
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 86ecec5601d4..588470eb8d39 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); 499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
500 struct edid *edid; 500 struct edid *edid;
501 struct i2c_adapter *i2c; 501 struct i2c_adapter *i2c;
502 bool ret = false;
502 503
503 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 504 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
504 505
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
515 */ 516 */
516 if (!is_digital) { 517 if (!is_digital) {
517 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 518 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
518 return true; 519 ret = true;
520 } else {
521 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
519 } 522 }
520
521 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
522 } else { 523 } else {
523 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); 524 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
524 } 525 }
525 526
526 kfree(edid); 527 kfree(edid);
527 528
528 return false; 529 return ret;
529} 530}
530 531
531static enum drm_connector_status 532static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3dc8724df400..f1e4a21d4664 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2235 i915_vma_pin_fence(vma); 2235 i915_vma_pin_fence(vma);
2236 } 2236 }
2237 2237
2238 i915_vma_get(vma);
2238err: 2239err:
2239 intel_runtime_pm_put(dev_priv); 2240 intel_runtime_pm_put(dev_priv);
2240 return vma; 2241 return vma;
2241} 2242}
2242 2243
2243void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2244void intel_unpin_fb_vma(struct i915_vma *vma)
2244{ 2245{
2245 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2246 lockdep_assert_held(&vma->vm->dev->struct_mutex);
2246 struct i915_ggtt_view view;
2247 struct i915_vma *vma;
2248 2247
2249 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2248 if (WARN_ON_ONCE(!vma))
2250 2249 return;
2251 intel_fill_fb_ggtt_view(&view, fb, rotation);
2252 vma = i915_gem_object_to_ggtt(obj, &view);
2253 2250
2254 i915_vma_unpin_fence(vma); 2251 i915_vma_unpin_fence(vma);
2255 i915_gem_object_unpin_from_display_plane(vma); 2252 i915_gem_object_unpin_from_display_plane(vma);
2253 i915_vma_put(vma);
2256} 2254}
2257 2255
2258static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2256static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2585,8 +2583,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2585 * We only keep the x/y offsets, so push all of the 2583 * We only keep the x/y offsets, so push all of the
2586 * gtt offset into the x/y offsets. 2584 * gtt offset into the x/y offsets.
2587 */ 2585 */
2588 _intel_adjust_tile_offset(&x, &y, tile_size, 2586 _intel_adjust_tile_offset(&x, &y,
2589 tile_width, tile_height, pitch_tiles, 2587 tile_width, tile_height,
2588 tile_size, pitch_tiles,
2590 gtt_offset_rotated * tile_size, 0); 2589 gtt_offset_rotated * tile_size, 0);
2591 2590
2592 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2591 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2746,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2746 struct drm_device *dev = intel_crtc->base.dev; 2745 struct drm_device *dev = intel_crtc->base.dev;
2747 struct drm_i915_private *dev_priv = to_i915(dev); 2746 struct drm_i915_private *dev_priv = to_i915(dev);
2748 struct drm_crtc *c; 2747 struct drm_crtc *c;
2749 struct intel_crtc *i;
2750 struct drm_i915_gem_object *obj; 2748 struct drm_i915_gem_object *obj;
2751 struct drm_plane *primary = intel_crtc->base.primary; 2749 struct drm_plane *primary = intel_crtc->base.primary;
2752 struct drm_plane_state *plane_state = primary->state; 2750 struct drm_plane_state *plane_state = primary->state;
@@ -2771,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2771 * an fb with another CRTC instead 2769 * an fb with another CRTC instead
2772 */ 2770 */
2773 for_each_crtc(dev, c) { 2771 for_each_crtc(dev, c) {
2774 i = to_intel_crtc(c); 2772 struct intel_plane_state *state;
2775 2773
2776 if (c == &intel_crtc->base) 2774 if (c == &intel_crtc->base)
2777 continue; 2775 continue;
2778 2776
2779 if (!i->active) 2777 if (!to_intel_crtc(c)->active)
2780 continue; 2778 continue;
2781 2779
2782 fb = c->primary->fb; 2780 state = to_intel_plane_state(c->primary->state);
2783 if (!fb) 2781 if (!state->vma)
2784 continue; 2782 continue;
2785 2783
2786 obj = intel_fb_obj(fb); 2784 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2787 if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { 2785 fb = c->primary->fb;
2788 drm_framebuffer_reference(fb); 2786 drm_framebuffer_reference(fb);
2789 goto valid_fb; 2787 goto valid_fb;
2790 } 2788 }
@@ -2805,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2805 return; 2803 return;
2806 2804
2807valid_fb: 2805valid_fb:
2806 mutex_lock(&dev->struct_mutex);
2807 intel_state->vma =
2808 intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
2809 mutex_unlock(&dev->struct_mutex);
2810 if (IS_ERR(intel_state->vma)) {
2811 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2812 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2813
2814 intel_state->vma = NULL;
2815 drm_framebuffer_unreference(fb);
2816 return;
2817 }
2818
2808 plane_state->src_x = 0; 2819 plane_state->src_x = 0;
2809 plane_state->src_y = 0; 2820 plane_state->src_y = 0;
2810 plane_state->src_w = fb->width << 16; 2821 plane_state->src_w = fb->width << 16;
@@ -2967,6 +2978,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
2967 unsigned int rotation = plane_state->base.rotation; 2978 unsigned int rotation = plane_state->base.rotation;
2968 int ret; 2979 int ret;
2969 2980
2981 if (!plane_state->base.visible)
2982 return 0;
2983
2970 /* Rotate src coordinates to match rotated GTT view */ 2984 /* Rotate src coordinates to match rotated GTT view */
2971 if (drm_rotation_90_or_270(rotation)) 2985 if (drm_rotation_90_or_270(rotation))
2972 drm_rect_rotate(&plane_state->base.src, 2986 drm_rect_rotate(&plane_state->base.src,
@@ -3097,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
3097 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 3111 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3098 if (INTEL_GEN(dev_priv) >= 4) { 3112 if (INTEL_GEN(dev_priv) >= 4) {
3099 I915_WRITE(DSPSURF(plane), 3113 I915_WRITE(DSPSURF(plane),
3100 intel_fb_gtt_offset(fb, rotation) + 3114 intel_plane_ggtt_offset(plane_state) +
3101 intel_crtc->dspaddr_offset); 3115 intel_crtc->dspaddr_offset);
3102 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 3116 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3103 I915_WRITE(DSPLINOFF(plane), linear_offset); 3117 I915_WRITE(DSPLINOFF(plane), linear_offset);
3104 } else { 3118 } else {
3105 I915_WRITE(DSPADDR(plane), 3119 I915_WRITE(DSPADDR(plane),
3106 intel_fb_gtt_offset(fb, rotation) + 3120 intel_plane_ggtt_offset(plane_state) +
3107 intel_crtc->dspaddr_offset); 3121 intel_crtc->dspaddr_offset);
3108 } 3122 }
3109 POSTING_READ(reg); 3123 POSTING_READ(reg);
@@ -3200,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
3200 3214
3201 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 3215 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3202 I915_WRITE(DSPSURF(plane), 3216 I915_WRITE(DSPSURF(plane),
3203 intel_fb_gtt_offset(fb, rotation) + 3217 intel_plane_ggtt_offset(plane_state) +
3204 intel_crtc->dspaddr_offset); 3218 intel_crtc->dspaddr_offset);
3205 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3219 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3206 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 3220 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3223,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
3223 } 3237 }
3224} 3238}
3225 3239
3226u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
3227 unsigned int rotation)
3228{
3229 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3230 struct i915_ggtt_view view;
3231 struct i915_vma *vma;
3232
3233 intel_fill_fb_ggtt_view(&view, fb, rotation);
3234
3235 vma = i915_gem_object_to_ggtt(obj, &view);
3236 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
3237 view.type))
3238 return -1;
3239
3240 return i915_ggtt_offset(vma);
3241}
3242
3243static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3240static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3244{ 3241{
3245 struct drm_device *dev = intel_crtc->base.dev; 3242 struct drm_device *dev = intel_crtc->base.dev;
@@ -3434,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3434 } 3431 }
3435 3432
3436 I915_WRITE(PLANE_SURF(pipe, 0), 3433 I915_WRITE(PLANE_SURF(pipe, 0),
3437 intel_fb_gtt_offset(fb, rotation) + surf_addr); 3434 intel_plane_ggtt_offset(plane_state) + surf_addr);
3438 3435
3439 POSTING_READ(PLANE_SURF(pipe, 0)); 3436 POSTING_READ(PLANE_SURF(pipe, 0));
3440} 3437}
@@ -6846,6 +6843,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6846 } 6843 }
6847 6844
6848 state = drm_atomic_state_alloc(crtc->dev); 6845 state = drm_atomic_state_alloc(crtc->dev);
6846 if (!state) {
6847 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6848 crtc->base.id, crtc->name);
6849 return;
6850 }
6851
6849 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 6852 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
6850 6853
6851 /* Everything's already locked, -EDEADLK can't happen. */ 6854 /* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11246,7 @@ found:
11243 } 11246 }
11244 11247
11245 old->restore_state = restore_state; 11248 old->restore_state = restore_state;
11249 drm_atomic_state_put(state);
11246 11250
11247 /* let the connector get through one full cycle before testing */ 11251 /* let the connector get through one full cycle before testing */
11248 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11252 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11522,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
11522 flush_work(&work->mmio_work); 11526 flush_work(&work->mmio_work);
11523 11527
11524 mutex_lock(&dev->struct_mutex); 11528 mutex_lock(&dev->struct_mutex);
11525 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11529 intel_unpin_fb_vma(work->old_vma);
11526 i915_gem_object_put(work->pending_flip_obj); 11530 i915_gem_object_put(work->pending_flip_obj);
11527 mutex_unlock(&dev->struct_mutex); 11531 mutex_unlock(&dev->struct_mutex);
11528 11532
@@ -12232,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12232 goto cleanup_pending; 12236 goto cleanup_pending;
12233 } 12237 }
12234 12238
12235 work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); 12239 work->old_vma = to_intel_plane_state(primary->state)->vma;
12236 work->gtt_offset += intel_crtc->dspaddr_offset; 12240 to_intel_plane_state(primary->state)->vma = vma;
12241
12242 work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
12237 work->rotation = crtc->primary->state->rotation; 12243 work->rotation = crtc->primary->state->rotation;
12238 12244
12239 /* 12245 /*
@@ -12287,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12287cleanup_request: 12293cleanup_request:
12288 i915_add_request_no_flush(request); 12294 i915_add_request_no_flush(request);
12289cleanup_unpin: 12295cleanup_unpin:
12290 intel_unpin_fb_obj(fb, crtc->primary->state->rotation); 12296 to_intel_plane_state(primary->state)->vma = work->old_vma;
12297 intel_unpin_fb_vma(vma);
12291cleanup_pending: 12298cleanup_pending:
12292 atomic_dec(&intel_crtc->unpin_work_count); 12299 atomic_dec(&intel_crtc->unpin_work_count);
12293unlock: 12300unlock:
@@ -14512,8 +14519,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
14512 break; 14519 break;
14513 14520
14514 case FENCE_FREE: 14521 case FENCE_FREE:
14515 drm_atomic_state_put(&state->base); 14522 {
14516 break; 14523 struct intel_atomic_helper *helper =
14524 &to_i915(state->base.dev)->atomic_helper;
14525
14526 if (llist_add(&state->freed, &helper->free_list))
14527 schedule_work(&helper->free_work);
14528 break;
14529 }
14517 } 14530 }
14518 14531
14519 return NOTIFY_DONE; 14532 return NOTIFY_DONE;
@@ -14774,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
14774 DRM_DEBUG_KMS("failed to pin object\n"); 14787 DRM_DEBUG_KMS("failed to pin object\n");
14775 return PTR_ERR(vma); 14788 return PTR_ERR(vma);
14776 } 14789 }
14790
14791 to_intel_plane_state(new_state)->vma = vma;
14777 } 14792 }
14778 14793
14779 return 0; 14794 return 0;
@@ -14792,19 +14807,12 @@ void
14792intel_cleanup_plane_fb(struct drm_plane *plane, 14807intel_cleanup_plane_fb(struct drm_plane *plane,
14793 struct drm_plane_state *old_state) 14808 struct drm_plane_state *old_state)
14794{ 14809{
14795 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14810 struct i915_vma *vma;
14796 struct intel_plane_state *old_intel_state;
14797 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14798 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
14799
14800 old_intel_state = to_intel_plane_state(old_state);
14801
14802 if (!obj && !old_obj)
14803 return;
14804 14811
14805 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 14812 /* Should only be called after a successful intel_prepare_plane_fb()! */
14806 !INTEL_INFO(dev_priv)->cursor_needs_physical)) 14813 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
14807 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14814 if (vma)
14815 intel_unpin_fb_vma(vma);
14808} 14816}
14809 14817
14810int 14818int
@@ -15146,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
15146 if (!obj) 15154 if (!obj)
15147 addr = 0; 15155 addr = 0;
15148 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) 15156 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
15149 addr = i915_gem_object_ggtt_offset(obj, NULL); 15157 addr = intel_plane_ggtt_offset(state);
15150 else 15158 else
15151 addr = obj->phys_handle->busaddr; 15159 addr = obj->phys_handle->busaddr;
15152 15160
@@ -16392,6 +16400,18 @@ fail:
16392 drm_modeset_acquire_fini(&ctx); 16400 drm_modeset_acquire_fini(&ctx);
16393} 16401}
16394 16402
16403static void intel_atomic_helper_free_state(struct work_struct *work)
16404{
16405 struct drm_i915_private *dev_priv =
16406 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16407 struct intel_atomic_state *state, *next;
16408 struct llist_node *freed;
16409
16410 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16411 llist_for_each_entry_safe(state, next, freed, freed)
16412 drm_atomic_state_put(&state->base);
16413}
16414
16395int intel_modeset_init(struct drm_device *dev) 16415int intel_modeset_init(struct drm_device *dev)
16396{ 16416{
16397 struct drm_i915_private *dev_priv = to_i915(dev); 16417 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16431,9 @@ int intel_modeset_init(struct drm_device *dev)
16411 16431
16412 dev->mode_config.funcs = &intel_mode_funcs; 16432 dev->mode_config.funcs = &intel_mode_funcs;
16413 16433
16434 INIT_WORK(&dev_priv->atomic_helper.free_work,
16435 intel_atomic_helper_free_state);
16436
16414 intel_init_quirks(dev); 16437 intel_init_quirks(dev);
16415 16438
16416 intel_init_pm(dev_priv); 16439 intel_init_pm(dev_priv);
@@ -17024,47 +17047,19 @@ void intel_display_resume(struct drm_device *dev)
17024 17047
17025 if (ret) 17048 if (ret)
17026 DRM_ERROR("Restoring old state failed with %i\n", ret); 17049 DRM_ERROR("Restoring old state failed with %i\n", ret);
17027 drm_atomic_state_put(state); 17050 if (state)
17051 drm_atomic_state_put(state);
17028} 17052}
17029 17053
17030void intel_modeset_gem_init(struct drm_device *dev) 17054void intel_modeset_gem_init(struct drm_device *dev)
17031{ 17055{
17032 struct drm_i915_private *dev_priv = to_i915(dev); 17056 struct drm_i915_private *dev_priv = to_i915(dev);
17033 struct drm_crtc *c;
17034 struct drm_i915_gem_object *obj;
17035 17057
17036 intel_init_gt_powersave(dev_priv); 17058 intel_init_gt_powersave(dev_priv);
17037 17059
17038 intel_modeset_init_hw(dev); 17060 intel_modeset_init_hw(dev);
17039 17061
17040 intel_setup_overlay(dev_priv); 17062 intel_setup_overlay(dev_priv);
17041
17042 /*
17043 * Make sure any fbs we allocated at startup are properly
17044 * pinned & fenced. When we do the allocation it's too early
17045 * for this.
17046 */
17047 for_each_crtc(dev, c) {
17048 struct i915_vma *vma;
17049
17050 obj = intel_fb_obj(c->primary->fb);
17051 if (obj == NULL)
17052 continue;
17053
17054 mutex_lock(&dev->struct_mutex);
17055 vma = intel_pin_and_fence_fb_obj(c->primary->fb,
17056 c->primary->state->rotation);
17057 mutex_unlock(&dev->struct_mutex);
17058 if (IS_ERR(vma)) {
17059 DRM_ERROR("failed to pin boot fb on pipe %d\n",
17060 to_intel_crtc(c)->pipe);
17061 drm_framebuffer_unreference(c->primary->fb);
17062 c->primary->fb = NULL;
17063 c->primary->crtc = c->primary->state->crtc = NULL;
17064 update_state_fb(c->primary);
17065 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
17066 }
17067 }
17068} 17063}
17069 17064
17070int intel_connector_register(struct drm_connector *connector) 17065int intel_connector_register(struct drm_connector *connector)
@@ -17094,6 +17089,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
17094{ 17089{
17095 struct drm_i915_private *dev_priv = to_i915(dev); 17090 struct drm_i915_private *dev_priv = to_i915(dev);
17096 17091
17092 flush_work(&dev_priv->atomic_helper.free_work);
17093 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
17094
17097 intel_disable_gt_powersave(dev_priv); 17095 intel_disable_gt_powersave(dev_priv);
17098 17096
17099 /* 17097 /*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd132c216a67..03a2112004f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -370,11 +370,14 @@ struct intel_atomic_state {
370 struct skl_wm_values wm_results; 370 struct skl_wm_values wm_results;
371 371
372 struct i915_sw_fence commit_ready; 372 struct i915_sw_fence commit_ready;
373
374 struct llist_node freed;
373}; 375};
374 376
375struct intel_plane_state { 377struct intel_plane_state {
376 struct drm_plane_state base; 378 struct drm_plane_state base;
377 struct drm_rect clip; 379 struct drm_rect clip;
380 struct i915_vma *vma;
378 381
379 struct { 382 struct {
380 u32 offset; 383 u32 offset;
@@ -1044,6 +1047,7 @@ struct intel_flip_work {
1044 struct work_struct mmio_work; 1047 struct work_struct mmio_work;
1045 1048
1046 struct drm_crtc *crtc; 1049 struct drm_crtc *crtc;
1050 struct i915_vma *old_vma;
1047 struct drm_framebuffer *old_fb; 1051 struct drm_framebuffer *old_fb;
1048 struct drm_i915_gem_object *pending_flip_obj; 1052 struct drm_i915_gem_object *pending_flip_obj;
1049 struct drm_pending_vblank_event *event; 1053 struct drm_pending_vblank_event *event;
@@ -1271,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1271 struct drm_modeset_acquire_ctx *ctx); 1275 struct drm_modeset_acquire_ctx *ctx);
1272struct i915_vma * 1276struct i915_vma *
1273intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1277intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1274void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1278void intel_unpin_fb_vma(struct i915_vma *vma);
1275struct drm_framebuffer * 1279struct drm_framebuffer *
1276__intel_framebuffer_create(struct drm_device *dev, 1280__intel_framebuffer_create(struct drm_device *dev,
1277 struct drm_mode_fb_cmd2 *mode_cmd, 1281 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1360,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1360int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1364int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1361int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); 1365int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1362 1366
1363u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); 1367static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
1368{
1369 return i915_ggtt_offset(state->vma);
1370}
1364 1371
1365u32 skl_plane_ctl_format(uint32_t pixel_format); 1372u32 skl_plane_ctl_format(uint32_t pixel_format);
1366u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1373u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 62f215b12eb5..f3a1d6a5cabe 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
173 if (IS_I945GM(dev_priv)) 173 if (IS_I945GM(dev_priv))
174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
176 fbc_ctl |= params->fb.fence_reg; 176 fbc_ctl |= params->vma->fence->id;
177 I915_WRITE(FBC_CONTROL, fbc_ctl); 177 I915_WRITE(FBC_CONTROL, fbc_ctl);
178} 178}
179 179
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
193 else 193 else
194 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 194 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
195 195
196 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 196 if (params->vma->fence) {
197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; 197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
198 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 198 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
199 } else { 199 } else {
200 I915_WRITE(DPFC_FENCE_YOFF, 0); 200 I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
251 break; 251 break;
252 } 252 }
253 253
254 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 254 if (params->vma->fence) {
255 dpfc_ctl |= DPFC_CTL_FENCE_EN; 255 dpfc_ctl |= DPFC_CTL_FENCE_EN;
256 if (IS_GEN5(dev_priv)) 256 if (IS_GEN5(dev_priv))
257 dpfc_ctl |= params->fb.fence_reg; 257 dpfc_ctl |= params->vma->fence->id;
258 if (IS_GEN6(dev_priv)) { 258 if (IS_GEN6(dev_priv)) {
259 I915_WRITE(SNB_DPFC_CTL_SA, 259 I915_WRITE(SNB_DPFC_CTL_SA,
260 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 260 SNB_CPU_FENCE_ENABLE |
261 params->vma->fence->id);
261 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 262 I915_WRITE(DPFC_CPU_FENCE_OFFSET,
262 params->crtc.fence_y_offset); 263 params->crtc.fence_y_offset);
263 } 264 }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
269 } 270 }
270 271
271 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 272 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
272 I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); 273 I915_WRITE(ILK_FBC_RT_BASE,
274 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
273 /* enable it... */ 275 /* enable it... */
274 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 276 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
275 277
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
319 break; 321 break;
320 } 322 }
321 323
322 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 324 if (params->vma->fence) {
323 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 325 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
324 I915_WRITE(SNB_DPFC_CTL_SA, 326 I915_WRITE(SNB_DPFC_CTL_SA,
325 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 327 SNB_CPU_FENCE_ENABLE |
328 params->vma->fence->id);
326 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 329 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
327 } else { 330 } else {
328 I915_WRITE(SNB_DPFC_CTL_SA,0); 331 I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
727 return effective_w <= max_w && effective_h <= max_h; 730 return effective_w <= max_w && effective_h <= max_h;
728} 731}
729 732
730/* XXX replace me when we have VMA tracking for intel_plane_state */
731static int get_fence_id(struct drm_framebuffer *fb)
732{
733 struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
734
735 return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
736}
737
738static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 733static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
739 struct intel_crtc_state *crtc_state, 734 struct intel_crtc_state *crtc_state,
740 struct intel_plane_state *plane_state) 735 struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
743 struct intel_fbc *fbc = &dev_priv->fbc; 738 struct intel_fbc *fbc = &dev_priv->fbc;
744 struct intel_fbc_state_cache *cache = &fbc->state_cache; 739 struct intel_fbc_state_cache *cache = &fbc->state_cache;
745 struct drm_framebuffer *fb = plane_state->base.fb; 740 struct drm_framebuffer *fb = plane_state->base.fb;
746 struct drm_i915_gem_object *obj; 741
742 cache->vma = NULL;
747 743
748 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 744 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
749 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 745 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
758 if (!cache->plane.visible) 754 if (!cache->plane.visible)
759 return; 755 return;
760 756
761 obj = intel_fb_obj(fb);
762
763 /* FIXME: We lack the proper locking here, so only run this on the
764 * platforms that need. */
765 if (IS_GEN(dev_priv, 5, 6))
766 cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
767 cache->fb.pixel_format = fb->pixel_format; 757 cache->fb.pixel_format = fb->pixel_format;
768 cache->fb.stride = fb->pitches[0]; 758 cache->fb.stride = fb->pitches[0];
769 cache->fb.fence_reg = get_fence_id(fb); 759
770 cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); 760 cache->vma = plane_state->vma;
771} 761}
772 762
773static bool intel_fbc_can_activate(struct intel_crtc *crtc) 763static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
784 return false; 774 return false;
785 } 775 }
786 776
787 if (!cache->plane.visible) { 777 if (!cache->vma) {
788 fbc->no_fbc_reason = "primary plane not visible"; 778 fbc->no_fbc_reason = "primary plane not visible";
789 return false; 779 return false;
790 } 780 }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
807 * so have no fence associated with it) due to aperture constaints 797 * so have no fence associated with it) due to aperture constaints
808 * at the time of pinning. 798 * at the time of pinning.
809 */ 799 */
810 if (cache->fb.tiling_mode != I915_TILING_X || 800 if (!cache->vma->fence) {
811 cache->fb.fence_reg == I915_FENCE_REG_NONE) {
812 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 801 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
813 return false; 802 return false;
814 } 803 }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
888 * zero. */ 877 * zero. */
889 memset(params, 0, sizeof(*params)); 878 memset(params, 0, sizeof(*params));
890 879
880 params->vma = cache->vma;
881
891 params->crtc.pipe = crtc->pipe; 882 params->crtc.pipe = crtc->pipe;
892 params->crtc.plane = crtc->plane; 883 params->crtc.plane = crtc->plane;
893 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); 884 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
894 885
895 params->fb.pixel_format = cache->fb.pixel_format; 886 params->fb.pixel_format = cache->fb.pixel_format;
896 params->fb.stride = cache->fb.stride; 887 params->fb.stride = cache->fb.stride;
897 params->fb.fence_reg = cache->fb.fence_reg;
898 888
899 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 889 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
900
901 params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
902} 890}
903 891
904static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, 892static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index beb08982dc0b..f4a8c4fc57c4 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
284out_destroy_fbi: 284out_destroy_fbi:
285 drm_fb_helper_release_fbi(helper); 285 drm_fb_helper_release_fbi(helper);
286out_unpin: 286out_unpin:
287 intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); 287 intel_unpin_fb_vma(vma);
288out_unlock: 288out_unlock:
289 mutex_unlock(&dev->struct_mutex); 289 mutex_unlock(&dev->struct_mutex);
290 return ret; 290 return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
549 549
550 if (ifbdev->fb) { 550 if (ifbdev->fb) {
551 mutex_lock(&ifbdev->helper.dev->struct_mutex); 551 mutex_lock(&ifbdev->helper.dev->struct_mutex);
552 intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); 552 intel_unpin_fb_vma(ifbdev->vma);
553 mutex_unlock(&ifbdev->helper.dev->struct_mutex); 553 mutex_unlock(&ifbdev->helper.dev->struct_mutex);
554 554
555 drm_framebuffer_remove(&ifbdev->fb->base); 555 drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
742{ 742{
743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
744 744
745 if (!ifbdev)
746 return;
747
745 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 748 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
746} 749}
747 750
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4961fa20c73..beabc17e7c8a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
979 uint32_t *batch, 979 uint32_t *batch,
980 uint32_t index) 980 uint32_t index)
981{ 981{
982 struct drm_i915_private *dev_priv = engine->i915;
983 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 982 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
984 983
985 /*
986 * WaDisableLSQCROPERFforOCL:kbl
987 * This WA is implemented in skl_init_clock_gating() but since
988 * this batch updates GEN8_L3SQCREG4 with default value we need to
989 * set this bit here to retain the WA during flush.
990 */
991 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
992 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
993
994 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 984 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
995 MI_SRM_LRM_GLOBAL_GTT)); 985 MI_SRM_LRM_GLOBAL_GTT));
996 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 986 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index aeb637dc1fdf..91cb4c422ad5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1096 HDC_FENCE_DEST_SLM_DISABLE); 1096 HDC_FENCE_DEST_SLM_DISABLE);
1097 1097
1098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1099 * involving this register should also be added to WA batch as required.
1100 */
1101 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1102 /* WaDisableLSQCROPERFforOCL:kbl */
1103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1104 GEN8_LQSC_RO_PERF_DIS);
1105
1106 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1098 /* WaToEnableHwFixForPushConstHWBug:kbl */
1107 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1108 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8f131a08d440..242a73e66d82 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
273 273
274 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); 274 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
275 I915_WRITE(PLANE_SURF(pipe, plane), 275 I915_WRITE(PLANE_SURF(pipe, plane),
276 intel_fb_gtt_offset(fb, rotation) + surf_addr); 276 intel_plane_ggtt_offset(plane_state) + surf_addr);
277 POSTING_READ(PLANE_SURF(pipe, plane)); 277 POSTING_READ(PLANE_SURF(pipe, plane));
278} 278}
279 279
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
458 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 458 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
459 I915_WRITE(SPCNTR(pipe, plane), sprctl); 459 I915_WRITE(SPCNTR(pipe, plane), sprctl);
460 I915_WRITE(SPSURF(pipe, plane), 460 I915_WRITE(SPSURF(pipe, plane),
461 intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); 461 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
462 POSTING_READ(SPSURF(pipe, plane)); 462 POSTING_READ(SPSURF(pipe, plane));
463} 463}
464 464
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
594 I915_WRITE(SPRSCALE(pipe), sprscale); 594 I915_WRITE(SPRSCALE(pipe), sprscale);
595 I915_WRITE(SPRCTL(pipe), sprctl); 595 I915_WRITE(SPRCTL(pipe), sprctl);
596 I915_WRITE(SPRSURF(pipe), 596 I915_WRITE(SPRSURF(pipe),
597 intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); 597 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
598 POSTING_READ(SPRSURF(pipe)); 598 POSTING_READ(SPRSURF(pipe));
599} 599}
600 600
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
721 I915_WRITE(DVSSCALE(pipe), dvsscale); 721 I915_WRITE(DVSSCALE(pipe), dvsscale);
722 I915_WRITE(DVSCNTR(pipe), dvscntr); 722 I915_WRITE(DVSCNTR(pipe), dvscntr);
723 I915_WRITE(DVSSURF(pipe), 723 I915_WRITE(DVSSURF(pipe),
724 intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); 724 intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
725 POSTING_READ(DVSSURF(pipe)); 725 POSTING_READ(DVSSURF(pipe));
726} 726}
727 727
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 14ff87686a36..686a580c711a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
345{ 345{
346 struct adreno_platform_config *config = pdev->dev.platform_data; 346 struct adreno_platform_config *config = pdev->dev.platform_data;
347 struct msm_gpu *gpu = &adreno_gpu->base; 347 struct msm_gpu *gpu = &adreno_gpu->base;
348 struct msm_mmu *mmu;
349 int ret; 348 int ret;
350 349
351 adreno_gpu->funcs = funcs; 350 adreno_gpu->funcs = funcs;
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
385 return ret; 384 return ret;
386 } 385 }
387 386
388 mmu = gpu->aspace->mmu; 387 if (gpu->aspace && gpu->aspace->mmu) {
389 if (mmu) { 388 struct msm_mmu *mmu = gpu->aspace->mmu;
390 ret = mmu->funcs->attach(mmu, iommu_ports, 389 ret = mmu->funcs->attach(mmu, iommu_ports,
391 ARRAY_SIZE(iommu_ports)); 390 ARRAY_SIZE(iommu_ports));
392 if (ret) 391 if (ret)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5f6cd8745dbc..c396d459a9d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
119 119
120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
121{ 121{
122 int i;
123 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
124 struct drm_plane *plane;
125 struct drm_plane_state *plane_state;
126
127 for_each_plane_in_state(state, plane, plane_state, i)
128 mdp5_plane_complete_commit(plane, plane_state);
129 123
130 if (mdp5_kms->smp) 124 if (mdp5_kms->smp)
131 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); 125 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17b0cc101171..cdfc63d90c7b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
104 104
105 /* assigned by crtc blender */ 105 /* assigned by crtc blender */
106 enum mdp_mixer_stage_id stage; 106 enum mdp_mixer_stage_id stage;
107
108 bool pending : 1;
109}; 107};
110#define to_mdp5_plane_state(x) \ 108#define to_mdp5_plane_state(x) \
111 container_of(x, struct mdp5_plane_state, base) 109 container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
232void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 230void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
233 231
234uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 232uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
235void mdp5_plane_complete_commit(struct drm_plane *plane,
236 struct drm_plane_state *state);
237enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 233enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
238struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); 234struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
239 235
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index c099da7bc212..25d9d0a97156 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
179 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 179 drm_printf(p, "\tzpos=%u\n", pstate->zpos);
180 drm_printf(p, "\talpha=%u\n", pstate->alpha); 180 drm_printf(p, "\talpha=%u\n", pstate->alpha);
181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
182 drm_printf(p, "\tpending=%u\n", pstate->pending);
183} 182}
184 183
185static void mdp5_plane_reset(struct drm_plane *plane) 184static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
220 if (mdp5_state && mdp5_state->base.fb) 219 if (mdp5_state && mdp5_state->base.fb)
221 drm_framebuffer_reference(mdp5_state->base.fb); 220 drm_framebuffer_reference(mdp5_state->base.fb);
222 221
223 mdp5_state->pending = false;
224
225 return &mdp5_state->base; 222 return &mdp5_state->base;
226} 223}
227 224
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
288 DBG("%s: check (%d -> %d)", plane->name, 285 DBG("%s: check (%d -> %d)", plane->name,
289 plane_enabled(old_state), plane_enabled(state)); 286 plane_enabled(old_state), plane_enabled(state));
290 287
291 /* We don't allow faster-than-vblank updates.. if we did add this
292 * some day, we would need to disallow in cases where hwpipe
293 * changes
294 */
295 if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
296 return -EBUSY;
297
298 max_width = config->hw->lm.max_width << 16; 288 max_width = config->hw->lm.max_width << 16;
299 max_height = config->hw->lm.max_height << 16; 289 max_height = config->hw->lm.max_height << 16;
300 290
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
370 struct drm_plane_state *old_state) 360 struct drm_plane_state *old_state)
371{ 361{
372 struct drm_plane_state *state = plane->state; 362 struct drm_plane_state *state = plane->state;
373 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
374 363
375 DBG("%s: update", plane->name); 364 DBG("%s: update", plane->name);
376 365
377 mdp5_state->pending = true;
378
379 if (plane_enabled(state)) { 366 if (plane_enabled(state)) {
380 int ret; 367 int ret;
381 368
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
851 return pstate->hwpipe->flush_mask; 838 return pstate->hwpipe->flush_mask;
852} 839}
853 840
854/* called after vsync in thread context */
855void mdp5_plane_complete_commit(struct drm_plane *plane,
856 struct drm_plane_state *state)
857{
858 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
859
860 pstate->pending = false;
861}
862
863/* initialize plane */ 841/* initialize plane */
864struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) 842struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
865{ 843{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8bc59c7e261..8098677a3916 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
295 295
296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
297 if (!priv->aspace[id])
298 continue;
297 msm_gem_unmap_vma(priv->aspace[id], 299 msm_gem_unmap_vma(priv->aspace[id],
298 &msm_obj->domain[id], msm_obj->sgt); 300 &msm_obj->domain[id], msm_obj->sgt);
299 } 301 }
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 74856a8b8f35..e64f52464ecf 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
222 uint32_t mpllP; 222 uint32_t mpllP;
223 223
224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
225 mpllP = (mpllP >> 8) & 0xf;
225 if (!mpllP) 226 if (!mpllP)
226 mpllP = 4; 227 mpllP = 4;
227 228
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
232 uint32_t clock; 233 uint32_t clock;
233 234
234 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 235 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
235 return clock; 236 return clock / 1000;
236 } 237 }
237 238
238 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); 239 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cef08da1da4e..6a157763dfc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
411 return ret; 411 return ret;
412 412
413 /* enable polling for external displays */ 413 /* enable polling for external displays */
414 drm_kms_helper_poll_enable(dev); 414 if (!dev->mode_config.poll_enabled)
415 drm_kms_helper_poll_enable(dev);
415 416
416 /* enable hotplug interrupts */ 417 /* enable hotplug interrupts */
417 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 418 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 59348fc41c77..bc85a45f91cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
773 pci_set_master(pdev); 773 pci_set_master(pdev);
774 774
775 ret = nouveau_do_resume(drm_dev, true); 775 ret = nouveau_do_resume(drm_dev, true);
776 drm_kms_helper_poll_enable(drm_dev); 776
777 if (!drm_dev->mode_config.poll_enabled)
778 drm_kms_helper_poll_enable(drm_dev);
779
777 /* do magic */ 780 /* do magic */
778 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
779 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5bfdacb..42c1fa53d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,8 @@ struct nouveau_drm {
165 struct backlight_device *backlight; 165 struct backlight_device *backlight;
166 struct list_head bl_connectors; 166 struct list_head bl_connectors;
167 struct work_struct hpd_work; 167 struct work_struct hpd_work;
168 struct work_struct fbcon_work;
169 int fbcon_new_state;
168#ifdef CONFIG_ACPI 170#ifdef CONFIG_ACPI
169 struct notifier_block acpi_nb; 171 struct notifier_block acpi_nb;
170#endif 172#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2f2a3dcd4ad7..fa2d0a978ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
470 .fb_probe = nouveau_fbcon_create, 470 .fb_probe = nouveau_fbcon_create,
471}; 471};
472 472
473static void
474nouveau_fbcon_set_suspend_work(struct work_struct *work)
475{
476 struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
477 int state = READ_ONCE(drm->fbcon_new_state);
478
479 if (state == FBINFO_STATE_RUNNING)
480 pm_runtime_get_sync(drm->dev->dev);
481
482 console_lock();
483 if (state == FBINFO_STATE_RUNNING)
484 nouveau_fbcon_accel_restore(drm->dev);
485 drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
486 if (state != FBINFO_STATE_RUNNING)
487 nouveau_fbcon_accel_save_disable(drm->dev);
488 console_unlock();
489
490 if (state == FBINFO_STATE_RUNNING) {
491 pm_runtime_mark_last_busy(drm->dev->dev);
492 pm_runtime_put_sync(drm->dev->dev);
493 }
494}
495
473void 496void
474nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 497nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
475{ 498{
476 struct nouveau_drm *drm = nouveau_drm(dev); 499 struct nouveau_drm *drm = nouveau_drm(dev);
477 if (drm->fbcon) { 500
478 console_lock(); 501 if (!drm->fbcon)
479 if (state == FBINFO_STATE_RUNNING) 502 return;
480 nouveau_fbcon_accel_restore(dev); 503
481 drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 504 drm->fbcon_new_state = state;
482 if (state != FBINFO_STATE_RUNNING) 505 /* Since runtime resume can happen as a result of a sysfs operation,
483 nouveau_fbcon_accel_save_disable(dev); 506 * it's possible we already have the console locked. So handle fbcon
484 console_unlock(); 507 * init/deinit from a seperate work thread
485 } 508 */
509 schedule_work(&drm->fbcon_work);
486} 510}
487 511
488int 512int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
502 return -ENOMEM; 526 return -ENOMEM;
503 527
504 drm->fbcon = fbcon; 528 drm->fbcon = fbcon;
529 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
505 530
506 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 531 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
507 532
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index ccdce1b4eec4..d5e58a38f160 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
99 struct nouveau_bo *bo; 99 struct nouveau_bo *bo;
100 struct nouveau_bo *bo_gart; 100 struct nouveau_bo *bo_gart;
101 u32 *suspend; 101 u32 *suspend;
102 struct mutex mutex;
102}; 103};
103 104
104int nv84_fence_context_new(struct nouveau_channel *); 105int nv84_fence_context_new(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 187ecdb82002..21a5775028cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
42} 42}
43 43
44/* nouveau_led.c */ 44/* nouveau_led.c */
45#if IS_ENABLED(CONFIG_LEDS_CLASS) 45#if IS_REACHABLE(CONFIG_LEDS_CLASS)
46int nouveau_led_init(struct drm_device *dev); 46int nouveau_led_init(struct drm_device *dev);
47void nouveau_led_suspend(struct drm_device *dev); 47void nouveau_led_suspend(struct drm_device *dev);
48void nouveau_led_resume(struct drm_device *dev); 48void nouveau_led_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6fa0f7f..1fba38622744 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
313 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { 313 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
314 /* block access to objects not created via this interface */ 314 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 315 owner = argv->v0.owner;
316 if (argv->v0.object == 0ULL) 316 if (argv->v0.object == 0ULL &&
317 argv->v0.type != NVIF_IOCTL_V0_DEL)
317 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ 318 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
318 else 319 else
319 argv->v0.owner = NVDRM_OBJECT_USIF; 320 argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2c2c64507661..32097fd615fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4052 } 4052 }
4053 } 4053 }
4054 4054
4055 for_each_crtc_in_state(state, crtc, crtc_state, i) {
4056 if (crtc->state->event)
4057 drm_crtc_vblank_get(crtc);
4058 }
4059
4055 /* Update plane(s). */ 4060 /* Update plane(s). */
4056 for_each_plane_in_state(state, plane, plane_state, i) { 4061 for_each_plane_in_state(state, plane, plane_state, i) {
4057 struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); 4062 struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4101 drm_crtc_send_vblank_event(crtc, crtc->state->event); 4106 drm_crtc_send_vblank_event(crtc, crtc->state->event);
4102 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4107 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4103 crtc->state->event = NULL; 4108 crtc->state->event = NULL;
4109 drm_crtc_vblank_put(crtc);
4104 } 4110 }
4105 } 4111 }
4106 4112
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 52b87ae83e7b..f0b322bec7df 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
107 struct nv84_fence_chan *fctx = chan->fence; 107 struct nv84_fence_chan *fctx = chan->fence;
108 108
109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); 109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
110 mutex_lock(&priv->mutex);
110 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); 111 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
111 nouveau_bo_vma_del(priv->bo, &fctx->vma); 112 nouveau_bo_vma_del(priv->bo, &fctx->vma);
113 mutex_unlock(&priv->mutex);
112 nouveau_fence_context_del(&fctx->base); 114 nouveau_fence_context_del(&fctx->base);
113 chan->fence = NULL; 115 chan->fence = NULL;
114 nouveau_fence_context_free(&fctx->base); 116 nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
134 fctx->base.sync32 = nv84_fence_sync32; 136 fctx->base.sync32 = nv84_fence_sync32;
135 fctx->base.sequence = nv84_fence_read(chan); 137 fctx->base.sequence = nv84_fence_read(chan);
136 138
139 mutex_lock(&priv->mutex);
137 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); 140 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
138 if (ret == 0) { 141 if (ret == 0) {
139 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, 142 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
140 &fctx->vma_gart); 143 &fctx->vma_gart);
141 } 144 }
145 mutex_unlock(&priv->mutex);
142 146
143 if (ret) 147 if (ret)
144 nv84_fence_context_del(chan); 148 nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
212 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); 216 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
213 priv->base.uevent = true; 217 priv->base.uevent = true;
214 218
219 mutex_init(&priv->mutex);
220
215 /* Use VRAM if there is any ; otherwise fallback to system memory */ 221 /* Use VRAM if there is any ; otherwise fallback to system memory */
216 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : 222 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
217 /* 223 /*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 6f0436df0219..f8f2f16c22a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
59 ); 59 );
60 } 60 }
61 for (i = 0; i < size; i++) 61 for (i = 0; i < size; i++)
62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); 62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
63 for (; i < 0x60; i++) 63 for (; i < 0x60; i++)
64 nvkm_wr32(device, 0x61c440 + soff, (i << 8)); 64 nvkm_wr32(device, 0x61c440 + soff, (i << 8));
65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); 65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 567466f93cd5..0db8efbf1c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
433 case 0x94: 433 case 0x94:
434 case 0x96: 434 case 0x96:
435 case 0x98: 435 case 0x98:
436 case 0xaa:
437 case 0xac:
438 return true; 436 return true;
439 default: 437 default:
440 break; 438 break;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 00ea0002b539..30bd4a6a9d46 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen 97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen
98 * 2.47.0 - Add UVD_NO_OP register support 98 * 2.47.0 - Add UVD_NO_OP register support
99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI 99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
100 * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
100 */ 101 */
101#define KMS_DRIVER_MAJOR 2 102#define KMS_DRIVER_MAJOR 2
102#define KMS_DRIVER_MINOR 48 103#define KMS_DRIVER_MINOR 49
103#define KMS_DRIVER_PATCHLEVEL 0 104#define KMS_DRIVER_PATCHLEVEL 0
104int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 105int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
105int radeon_driver_unload_kms(struct drm_device *dev); 106int radeon_driver_unload_kms(struct drm_device *dev);
@@ -366,11 +367,10 @@ static void
366radeon_pci_shutdown(struct pci_dev *pdev) 367radeon_pci_shutdown(struct pci_dev *pdev)
367{ 368{
368 /* if we are running in a VM, make sure the device 369 /* if we are running in a VM, make sure the device
369 * torn down properly on reboot/shutdown. 370 * torn down properly on reboot/shutdown
370 * unfortunately we can't detect certain
371 * hypervisors so just do this all the time.
372 */ 371 */
373 radeon_pci_remove(pdev); 372 if (radeon_device_is_virtual())
373 radeon_pci_remove(pdev);
374} 374}
375 375
376static int radeon_pmops_suspend(struct device *dev) 376static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0bcffd8a7bd3..96683f5b2b1b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
220 220
221 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 221 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
222 222
223 args->vram_size = rdev->mc.real_vram_size; 223 args->vram_size = (u64)man->size << PAGE_SHIFT;
224 args->vram_visible = (u64)man->size << PAGE_SHIFT; 224 args->vram_visible = rdev->mc.visible_vram_size;
225 args->vram_visible -= rdev->vram_pin_size; 225 args->vram_visible -= rdev->vram_pin_size;
226 args->gart_size = rdev->mc.gtt_size; 226 args->gart_size = rdev->mc.gtt_size;
227 args->gart_size -= rdev->gart_pin_size; 227 args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index e8a38d296855..414776811e71 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
114MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 114MODULE_FIRMWARE("radeon/hainan_rlc.bin");
115MODULE_FIRMWARE("radeon/hainan_smc.bin"); 115MODULE_FIRMWARE("radeon/hainan_smc.bin");
116MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 116MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
117MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
118
119MODULE_FIRMWARE("radeon/si58_mc.bin");
117 120
118static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 121static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
119static void si_pcie_gen3_enable(struct radeon_device *rdev); 122static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
1650 int err; 1653 int err;
1651 int new_fw = 0; 1654 int new_fw = 0;
1652 bool new_smc = false; 1655 bool new_smc = false;
1656 bool si58_fw = false;
1657 bool banks2_fw = false;
1653 1658
1654 DRM_DEBUG("\n"); 1659 DRM_DEBUG("\n");
1655 1660
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
1727 ((rdev->pdev->device == 0x6660) || 1732 ((rdev->pdev->device == 0x6660) ||
1728 (rdev->pdev->device == 0x6663) || 1733 (rdev->pdev->device == 0x6663) ||
1729 (rdev->pdev->device == 0x6665) || 1734 (rdev->pdev->device == 0x6665) ||
1730 (rdev->pdev->device == 0x6667))) || 1735 (rdev->pdev->device == 0x6667))))
1731 ((rdev->pdev->revision == 0xc3) &&
1732 (rdev->pdev->device == 0x6665)))
1733 new_smc = true; 1736 new_smc = true;
1737 else if ((rdev->pdev->revision == 0xc3) &&
1738 (rdev->pdev->device == 0x6665))
1739 banks2_fw = true;
1734 new_chip_name = "hainan"; 1740 new_chip_name = "hainan";
1735 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1741 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1736 me_req_size = SI_PM4_UCODE_SIZE * 4; 1742 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1742 default: BUG(); 1748 default: BUG();
1743 } 1749 }
1744 1750
1751 /* this memory configuration requires special firmware */
1752 if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1753 si58_fw = true;
1754
1745 DRM_INFO("Loading %s Microcode\n", new_chip_name); 1755 DRM_INFO("Loading %s Microcode\n", new_chip_name);
1746 1756
1747 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); 1757 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1845 } 1855 }
1846 } 1856 }
1847 1857
1848 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1858 if (si58_fw)
1859 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1860 else
1861 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1849 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1862 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1850 if (err) { 1863 if (err) {
1851 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); 1864 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
1876 } 1889 }
1877 } 1890 }
1878 1891
1879 if (new_smc) 1892 if (banks2_fw)
1893 snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
1894 else if (new_smc)
1880 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); 1895 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1881 else 1896 else
1882 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); 1897 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 13ba73fd9b68..2944916f7102 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3008 (rdev->pdev->device == 0x6817) || 3008 (rdev->pdev->device == 0x6817) ||
3009 (rdev->pdev->device == 0x6806)) 3009 (rdev->pdev->device == 0x6806))
3010 max_mclk = 120000; 3010 max_mclk = 120000;
3011 } else if (rdev->family == CHIP_OLAND) {
3012 if ((rdev->pdev->revision == 0xC7) ||
3013 (rdev->pdev->revision == 0x80) ||
3014 (rdev->pdev->revision == 0x81) ||
3015 (rdev->pdev->revision == 0x83) ||
3016 (rdev->pdev->revision == 0x87) ||
3017 (rdev->pdev->device == 0x6604) ||
3018 (rdev->pdev->device == 0x6605)) {
3019 max_sclk = 75000;
3020 max_mclk = 80000;
3021 }
3022 } else if (rdev->family == CHIP_HAINAN) { 3011 } else if (rdev->family == CHIP_HAINAN) {
3023 if ((rdev->pdev->revision == 0x81) || 3012 if ((rdev->pdev->revision == 0x81) ||
3024 (rdev->pdev->revision == 0x83) || 3013 (rdev->pdev->revision == 0x83) ||
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3027 (rdev->pdev->device == 0x6665) || 3016 (rdev->pdev->device == 0x6665) ||
3028 (rdev->pdev->device == 0x6667)) { 3017 (rdev->pdev->device == 0x6667)) {
3029 max_sclk = 75000; 3018 max_sclk = 75000;
3030 max_mclk = 80000;
3031 } 3019 }
3032 } 3020 }
3033 /* Apply dpm quirks */ 3021 /* Apply dpm quirks */
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index a0fd3e66bc4b..7aadce1f7e7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
839 839
840 } 840 }
841 841
842 __drm_atomic_helper_crtc_destroy_state(state); 842 drm_atomic_helper_crtc_destroy_state(crtc, state);
843} 843}
844 844
845static const struct drm_crtc_funcs vc4_crtc_funcs = { 845static const struct drm_crtc_funcs vc4_crtc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index db920771bfb5..ab3016982466 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
594 args->shader_rec_count); 594 args->shader_rec_count);
595 struct vc4_bo *bo; 595 struct vc4_bo *bo;
596 596
597 if (uniforms_offset < shader_rec_offset || 597 if (shader_rec_offset < args->bin_cl_size ||
598 uniforms_offset < shader_rec_offset ||
598 exec_size < uniforms_offset || 599 exec_size < uniforms_offset ||
599 args->shader_rec_count >= (UINT_MAX / 600 args->shader_rec_count >= (UINT_MAX /
600 sizeof(struct vc4_shader_state)) || 601 sizeof(struct vc4_shader_state)) ||
601 temp_size < exec_size) { 602 temp_size < exec_size) {
602 DRM_ERROR("overflow in exec arguments\n"); 603 DRM_ERROR("overflow in exec arguments\n");
604 ret = -EINVAL;
603 goto fail; 605 goto fail;
604 } 606 }
605 607
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 08886a309757..5cdd003605f5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
461 } 461 }
462 462
463 ret = vc4_full_res_bounds_check(exec, *obj, surf); 463 ret = vc4_full_res_bounds_check(exec, *obj, surf);
464 if (!ret) 464 if (ret)
465 return ret; 465 return ret;
466 466
467 return 0; 467 return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index dd21f950e129..cde9f3758106 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
331 info->fbops = &virtio_gpufb_ops; 331 info->fbops = &virtio_gpufb_ops;
332 info->pixmap.flags = FB_PIXMAP_SYSTEM; 332 info->pixmap.flags = FB_PIXMAP_SYSTEM;
333 333
334 info->screen_base = obj->vmap; 334 info->screen_buffer = obj->vmap;
335 info->screen_size = obj->gem_base.size; 335 info->screen_size = obj->gem_base.size;
336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
337 drm_fb_helper_fill_var(info, &vfbdev->helper, 337 drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f31a778b0851..b22d0f83f8e3 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -168,7 +168,7 @@ struct cp2112_device {
168 atomic_t xfer_avail; 168 atomic_t xfer_avail;
169 struct gpio_chip gc; 169 struct gpio_chip gc;
170 u8 *in_out_buffer; 170 u8 *in_out_buffer;
171 spinlock_t lock; 171 struct mutex lock;
172 172
173 struct gpio_desc *desc[8]; 173 struct gpio_desc *desc[8];
174 bool gpio_poll; 174 bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
186 struct cp2112_device *dev = gpiochip_get_data(chip); 186 struct cp2112_device *dev = gpiochip_get_data(chip);
187 struct hid_device *hdev = dev->hdev; 187 struct hid_device *hdev = dev->hdev;
188 u8 *buf = dev->in_out_buffer; 188 u8 *buf = dev->in_out_buffer;
189 unsigned long flags;
190 int ret; 189 int ret;
191 190
192 spin_lock_irqsave(&dev->lock, flags); 191 mutex_lock(&dev->lock);
193 192
194 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 193 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
195 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 194 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
213 ret = 0; 212 ret = 0;
214 213
215exit: 214exit:
216 spin_unlock_irqrestore(&dev->lock, flags); 215 mutex_unlock(&dev->lock);
217 return ret <= 0 ? ret : -EIO; 216 return ret < 0 ? ret : -EIO;
218} 217}
219 218
220static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 219static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
222 struct cp2112_device *dev = gpiochip_get_data(chip); 221 struct cp2112_device *dev = gpiochip_get_data(chip);
223 struct hid_device *hdev = dev->hdev; 222 struct hid_device *hdev = dev->hdev;
224 u8 *buf = dev->in_out_buffer; 223 u8 *buf = dev->in_out_buffer;
225 unsigned long flags;
226 int ret; 224 int ret;
227 225
228 spin_lock_irqsave(&dev->lock, flags); 226 mutex_lock(&dev->lock);
229 227
230 buf[0] = CP2112_GPIO_SET; 228 buf[0] = CP2112_GPIO_SET;
231 buf[1] = value ? 0xff : 0; 229 buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
237 if (ret < 0) 235 if (ret < 0)
238 hid_err(hdev, "error setting GPIO values: %d\n", ret); 236 hid_err(hdev, "error setting GPIO values: %d\n", ret);
239 237
240 spin_unlock_irqrestore(&dev->lock, flags); 238 mutex_unlock(&dev->lock);
241} 239}
242 240
243static int cp2112_gpio_get_all(struct gpio_chip *chip) 241static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
245 struct cp2112_device *dev = gpiochip_get_data(chip); 243 struct cp2112_device *dev = gpiochip_get_data(chip);
246 struct hid_device *hdev = dev->hdev; 244 struct hid_device *hdev = dev->hdev;
247 u8 *buf = dev->in_out_buffer; 245 u8 *buf = dev->in_out_buffer;
248 unsigned long flags;
249 int ret; 246 int ret;
250 247
251 spin_lock_irqsave(&dev->lock, flags); 248 mutex_lock(&dev->lock);
252 249
253 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, 250 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
254 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, 251 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
262 ret = buf[1]; 259 ret = buf[1];
263 260
264exit: 261exit:
265 spin_unlock_irqrestore(&dev->lock, flags); 262 mutex_unlock(&dev->lock);
266 263
267 return ret; 264 return ret;
268} 265}
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
284 struct cp2112_device *dev = gpiochip_get_data(chip); 281 struct cp2112_device *dev = gpiochip_get_data(chip);
285 struct hid_device *hdev = dev->hdev; 282 struct hid_device *hdev = dev->hdev;
286 u8 *buf = dev->in_out_buffer; 283 u8 *buf = dev->in_out_buffer;
287 unsigned long flags;
288 int ret; 284 int ret;
289 285
290 spin_lock_irqsave(&dev->lock, flags); 286 mutex_lock(&dev->lock);
291 287
292 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 288 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
293 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 289 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
308 goto fail; 304 goto fail;
309 } 305 }
310 306
311 spin_unlock_irqrestore(&dev->lock, flags); 307 mutex_unlock(&dev->lock);
312 308
313 /* 309 /*
314 * Set gpio value when output direction is already set, 310 * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
319 return 0; 315 return 0;
320 316
321fail: 317fail:
322 spin_unlock_irqrestore(&dev->lock, flags); 318 mutex_unlock(&dev->lock);
323 return ret < 0 ? ret : -EIO; 319 return ret < 0 ? ret : -EIO;
324} 320}
325 321
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1235 if (!dev->in_out_buffer) 1231 if (!dev->in_out_buffer)
1236 return -ENOMEM; 1232 return -ENOMEM;
1237 1233
1238 spin_lock_init(&dev->lock); 1234 mutex_init(&dev->lock);
1239 1235
1240 ret = hid_parse(hdev); 1236 ret = hid_parse(hdev);
1241 if (ret) { 1237 if (ret) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f46f2c5117fa..350accfee8e8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,6 +76,9 @@
76#define USB_VENDOR_ID_ALPS_JP 0x044E 76#define USB_VENDOR_ID_ALPS_JP 0x044E
77#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B 77#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
78 78
79#define USB_VENDOR_ID_AMI 0x046b
80#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
81
79#define USB_VENDOR_ID_ANTON 0x1130 82#define USB_VENDOR_ID_ANTON 0x1130
80#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 83#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
81 84
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c5c5fbe9d605..52026dc94d5c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
872 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), 872 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
873 .driver_data = LG_NOGET | LG_FF4 }, 873 .driver_data = LG_NOGET | LG_FF4 },
874 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), 874 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
875 .driver_data = LG_FF2 }, 875 .driver_data = LG_NOGET | LG_FF2 },
876 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), 876 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
877 .driver_data = LG_FF3 }, 877 .driver_data = LG_FF3 },
878 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), 878 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e9d6cc7cdfc5..30a2977e2645 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, 57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, 58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, 59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
60 { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
60 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, 61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0884dc9554fd..672145b0d8f5 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
166 wacom->id[0] = STYLUS_DEVICE_ID; 166 wacom->id[0] = STYLUS_DEVICE_ID;
167 } 167 }
168 168
169 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); 169 if (prox) {
170 if (features->pressure_max > 255) 170 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
171 pressure = (pressure << 1) | ((data[4] >> 6) & 1); 171 if (features->pressure_max > 255)
172 pressure += (features->pressure_max + 1) / 2; 172 pressure = (pressure << 1) | ((data[4] >> 6) & 1);
173 173 pressure += (features->pressure_max + 1) / 2;
174 input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); 174
175 input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); 175 input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
176 input_report_abs(input, ABS_PRESSURE, pressure); 176 input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
177 177 input_report_abs(input, ABS_PRESSURE, pressure);
178 input_report_key(input, BTN_TOUCH, data[4] & 0x08); 178
179 input_report_key(input, BTN_STYLUS, data[4] & 0x10); 179 input_report_key(input, BTN_TOUCH, data[4] & 0x08);
180 /* Only allow the stylus2 button to be reported for the pen tool. */ 180 input_report_key(input, BTN_STYLUS, data[4] & 0x10);
181 input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20)); 181 /* Only allow the stylus2 button to be reported for the pen tool. */
182 input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
183 }
182 184
183 if (!prox) 185 if (!prox)
184 wacom->id[0] = 0; 186 wacom->id[0] = 0;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index cd49cb17eb7f..308dbda700eb 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
383 return ret; 383 return ret;
384 } 384 }
385 385
386 init_cached_read_index(channel);
386 next_read_location = hv_get_next_read_location(inring_info); 387 next_read_location = hv_get_next_read_location(inring_info);
387 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 388 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
388 sizeof(desc), 389 sizeof(desc),
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 686971263bef..45d6771fac8c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
962 goto err_clk_dis; 962 goto err_clk_dis;
963 } 963 }
964 964
965 ret = i2c_add_adapter(&id->adap);
966 if (ret < 0)
967 goto err_clk_dis;
968
969 /* 965 /*
970 * Cadence I2C controller has a bug wherein it generates 966 * Cadence I2C controller has a bug wherein it generates
971 * invalid read transaction after HW timeout in master receiver mode. 967 * invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
975 */ 971 */
976 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); 972 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
977 973
974 ret = i2c_add_adapter(&id->adap);
975 if (ret < 0)
976 goto err_clk_dis;
977
978 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", 978 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
979 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); 979 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
980 980
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index c62b7cd475f8..3310f2e0dbd3 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/pinctrl/consumer.h>
31#include <linux/platform_device.h> 32#include <linux/platform_device.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
636 return 0; 637 return 0;
637} 638}
638 639
640#ifdef CONFIG_PM_SLEEP
641static int lpi2c_imx_suspend(struct device *dev)
642{
643 pinctrl_pm_select_sleep_state(dev);
644
645 return 0;
646}
647
648static int lpi2c_imx_resume(struct device *dev)
649{
650 pinctrl_pm_select_default_state(dev);
651
652 return 0;
653}
654#endif
655
656static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
657
639static struct platform_driver lpi2c_imx_driver = { 658static struct platform_driver lpi2c_imx_driver = {
640 .probe = lpi2c_imx_probe, 659 .probe = lpi2c_imx_probe,
641 .remove = lpi2c_imx_remove, 660 .remove = lpi2c_imx_remove,
642 .driver = { 661 .driver = {
643 .name = DRIVER_NAME, 662 .name = DRIVER_NAME,
644 .of_match_table = lpi2c_imx_of_match, 663 .of_match_table = lpi2c_imx_of_match,
664 .pm = &imx_lpi2c_pm,
645 }, 665 },
646}; 666};
647 667
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bbf0c521beb..7d61b566e148 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
775 775
776static int palmas_gpadc_suspend(struct device *dev) 776static int palmas_gpadc_suspend(struct device *dev)
777{ 777{
778 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 778 struct iio_dev *indio_dev = dev_get_drvdata(dev);
779 struct palmas_gpadc *adc = iio_priv(indio_dev); 779 struct palmas_gpadc *adc = iio_priv(indio_dev);
780 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; 780 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
781 int ret; 781 int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
798 798
799static int palmas_gpadc_resume(struct device *dev) 799static int palmas_gpadc_resume(struct device *dev)
800{ 800{
801 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 801 struct iio_dev *indio_dev = dev_get_drvdata(dev);
802 struct palmas_gpadc *adc = iio_priv(indio_dev); 802 struct palmas_gpadc *adc = iio_priv(indio_dev);
803 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; 803 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
804 int ret; 804 int ret;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 9a081465c42f..6bb23a49e81e 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
422 422
423static int __maybe_unused afe4403_suspend(struct device *dev) 423static int __maybe_unused afe4403_suspend(struct device *dev)
424{ 424{
425 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 425 struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
426 struct afe4403_data *afe = iio_priv(indio_dev); 426 struct afe4403_data *afe = iio_priv(indio_dev);
427 int ret; 427 int ret;
428 428
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
443 443
444static int __maybe_unused afe4403_resume(struct device *dev) 444static int __maybe_unused afe4403_resume(struct device *dev)
445{ 445{
446 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 446 struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
447 struct afe4403_data *afe = iio_priv(indio_dev); 447 struct afe4403_data *afe = iio_priv(indio_dev);
448 int ret; 448 int ret;
449 449
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 45266404f7e3..964f5231a831 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
428 428
429static int __maybe_unused afe4404_suspend(struct device *dev) 429static int __maybe_unused afe4404_suspend(struct device *dev)
430{ 430{
431 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 431 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
432 struct afe4404_data *afe = iio_priv(indio_dev); 432 struct afe4404_data *afe = iio_priv(indio_dev);
433 int ret; 433 int ret;
434 434
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
449 449
450static int __maybe_unused afe4404_resume(struct device *dev) 450static int __maybe_unused afe4404_resume(struct device *dev)
451{ 451{
452 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 452 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
453 struct afe4404_data *afe = iio_priv(indio_dev); 453 struct afe4404_data *afe = iio_priv(indio_dev);
454 int ret; 454 int ret;
455 455
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 90ab8a2d2846..183c14329d6e 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
238 238
239 mutex_lock(&data->lock); 239 mutex_lock(&data->lock);
240 240
241 while (cnt || (cnt = max30100_fifo_count(data) > 0)) { 241 while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
242 ret = max30100_read_measurement(data); 242 ret = max30100_read_measurement(data);
243 if (ret) 243 if (ret)
244 break; 244 break;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 9c47bc98f3ac..2a22ad920333 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -71,7 +71,8 @@
71 * a) select an implementation using busy loop polling on those systems 71 * a) select an implementation using busy loop polling on those systems
72 * b) use the checksum to do some probabilistic decoding 72 * b) use the checksum to do some probabilistic decoding
73 */ 73 */
74#define DHT11_START_TRANSMISSION 18 /* ms */ 74#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
75#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
75#define DHT11_MIN_TIMERES 34000 /* ns */ 76#define DHT11_MIN_TIMERES 34000 /* ns */
76#define DHT11_THRESHOLD 49000 /* ns */ 77#define DHT11_THRESHOLD 49000 /* ns */
77#define DHT11_AMBIG_LOW 23000 /* ns */ 78#define DHT11_AMBIG_LOW 23000 /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
228 ret = gpio_direction_output(dht11->gpio, 0); 229 ret = gpio_direction_output(dht11->gpio, 0);
229 if (ret) 230 if (ret)
230 goto err; 231 goto err;
231 msleep(DHT11_START_TRANSMISSION); 232 usleep_range(DHT11_START_TRANSMISSION_MIN,
233 DHT11_START_TRANSMISSION_MAX);
232 ret = gpio_direction_input(dht11->gpio); 234 ret = gpio_direction_input(dht11->gpio);
233 if (ret) 235 if (ret)
234 goto err; 236 goto err;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e7dcfac877ca..3e70a9c5d79d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2811 if (!src_addr || !src_addr->sa_family) { 2811 if (!src_addr || !src_addr->sa_family) {
2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2813 src_addr->sa_family = dst_addr->sa_family; 2813 src_addr->sa_family = dst_addr->sa_family;
2814 if (dst_addr->sa_family == AF_INET6) { 2814 if (IS_ENABLED(CONFIG_IPV6) &&
2815 dst_addr->sa_family == AF_INET6) {
2815 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2816 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
2816 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2817 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
2817 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2818 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 1e62a5f0cb28..4609b921f899 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
135 135
136 if (access & IB_ACCESS_ON_DEMAND) { 136 if (access & IB_ACCESS_ON_DEMAND) {
137 put_pid(umem->pid);
137 ret = ib_umem_odp_get(context, umem); 138 ret = ib_umem_odp_get(context, umem);
138 if (ret) { 139 if (ret) {
139 kfree(umem); 140 kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
149 150
150 page_list = (struct page **) __get_free_page(GFP_KERNEL); 151 page_list = (struct page **) __get_free_page(GFP_KERNEL);
151 if (!page_list) { 152 if (!page_list) {
153 put_pid(umem->pid);
152 kfree(umem); 154 kfree(umem);
153 return ERR_PTR(-ENOMEM); 155 return ERR_PTR(-ENOMEM);
154 } 156 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9d5fe1853da4..6262dc035f3c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
1135 1135
1136 memset(props, 0, sizeof(struct ib_port_attr)); 1136 memset(props, 0, sizeof(struct ib_port_attr));
1137 props->max_mtu = IB_MTU_4096; 1137 props->max_mtu = IB_MTU_4096;
1138 if (netdev->mtu >= 4096) 1138 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
1139 props->active_mtu = IB_MTU_4096;
1140 else if (netdev->mtu >= 2048)
1141 props->active_mtu = IB_MTU_2048;
1142 else if (netdev->mtu >= 1024)
1143 props->active_mtu = IB_MTU_1024;
1144 else if (netdev->mtu >= 512)
1145 props->active_mtu = IB_MTU_512;
1146 else
1147 props->active_mtu = IB_MTU_256;
1148 1139
1149 if (!netif_carrier_ok(netdev)) 1140 if (!netif_carrier_ok(netdev))
1150 props->state = IB_PORT_DOWN; 1141 props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc76d2d..9398143d7c5e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1804 skb_trim(skb, dlen); 1804 skb_trim(skb, dlen);
1805 mutex_lock(&ep->com.mutex); 1805 mutex_lock(&ep->com.mutex);
1806 1806
1807 /* update RX credits */
1808 update_rx_credits(ep, dlen);
1809
1810 switch (ep->com.state) { 1807 switch (ep->com.state) {
1811 case MPA_REQ_SENT: 1808 case MPA_REQ_SENT:
1809 update_rx_credits(ep, dlen);
1812 ep->rcv_seq += dlen; 1810 ep->rcv_seq += dlen;
1813 disconnect = process_mpa_reply(ep, skb); 1811 disconnect = process_mpa_reply(ep, skb);
1814 break; 1812 break;
1815 case MPA_REQ_WAIT: 1813 case MPA_REQ_WAIT:
1814 update_rx_credits(ep, dlen);
1816 ep->rcv_seq += dlen; 1815 ep->rcv_seq += dlen;
1817 disconnect = process_mpa_request(ep, skb); 1816 disconnect = process_mpa_request(ep, skb);
1818 break; 1817 break;
1819 case FPDU_MODE: { 1818 case FPDU_MODE: {
1820 struct c4iw_qp_attributes attrs; 1819 struct c4iw_qp_attributes attrs;
1820
1821 update_rx_credits(ep, dlen);
1821 BUG_ON(!ep->com.qp); 1822 BUG_ON(!ep->com.qp);
1822 if (status) 1823 if (status)
1823 pr_err("%s Unexpected streaming data." \ 1824 pr_err("%s Unexpected streaming data." \
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477af19f..bec82a600d77 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
505 } 505 }
506 506
507 /* 507 /*
508 * Special cqe for drain WR completions...
509 */
510 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
511 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
512 *cqe = *hw_cqe;
513 goto skip_cqe;
514 }
515
516 /*
508 * Gotta tweak READ completions: 517 * Gotta tweak READ completions:
509 * 1) the cqe doesn't contain the sq_wptr from the wr. 518 * 1) the cqe doesn't contain the sq_wptr from the wr.
510 * 2) opcode not reflected from the wr. 519 * 2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
753 c4iw_invalidate_mr(qhp->rhp, 762 c4iw_invalidate_mr(qhp->rhp,
754 CQE_WRID_FR_STAG(&cqe)); 763 CQE_WRID_FR_STAG(&cqe));
755 break; 764 break;
765 case C4IW_DRAIN_OPCODE:
766 wc->opcode = IB_WC_SEND;
767 break;
756 default: 768 default:
757 printk(KERN_ERR MOD "Unexpected opcode %d " 769 printk(KERN_ERR MOD "Unexpected opcode %d "
758 "in the CQE received for QPID=0x%0x\n", 770 "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
817 } 829 }
818 } 830 }
819out: 831out:
820 if (wq) { 832 if (wq)
821 if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
822 if (t4_sq_empty(wq))
823 complete(&qhp->sq_drained);
824 if (t4_rq_empty(wq))
825 complete(&qhp->rq_drained);
826 }
827 spin_unlock(&qhp->lock); 833 spin_unlock(&qhp->lock);
828 }
829 return ret; 834 return ret;
830} 835}
831 836
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 516b0ae6dc3f..40c0e7b9fc6e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
846 } 846 }
847 } 847 }
848 848
849 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
850 if (!rdev->free_workq) {
851 err = -ENOMEM;
852 goto err_free_status_page;
853 }
854
849 rdev->status_page->db_off = 0; 855 rdev->status_page->db_off = 0;
850 856
851 return 0; 857 return 0;
858err_free_status_page:
859 free_page((unsigned long)rdev->status_page);
852destroy_ocqp_pool: 860destroy_ocqp_pool:
853 c4iw_ocqp_pool_destroy(rdev); 861 c4iw_ocqp_pool_destroy(rdev);
854destroy_rqtpool: 862destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
862 870
863static void c4iw_rdev_close(struct c4iw_rdev *rdev) 871static void c4iw_rdev_close(struct c4iw_rdev *rdev)
864{ 872{
873 destroy_workqueue(rdev->free_workq);
865 kfree(rdev->wr_log); 874 kfree(rdev->wr_log);
866 free_page((unsigned long)rdev->status_page); 875 free_page((unsigned long)rdev->status_page);
867 c4iw_pblpool_destroy(rdev); 876 c4iw_pblpool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4788e1a46fde..8cd4d054a87e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,6 +45,7 @@
45#include <linux/kref.h> 45#include <linux/kref.h>
46#include <linux/timer.h> 46#include <linux/timer.h>
47#include <linux/io.h> 47#include <linux/io.h>
48#include <linux/workqueue.h>
48 49
49#include <asm/byteorder.h> 50#include <asm/byteorder.h>
50 51
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
107 struct list_head qpids; 108 struct list_head qpids;
108 struct list_head cqids; 109 struct list_head cqids;
109 struct mutex lock; 110 struct mutex lock;
111 struct kref kref;
110}; 112};
111 113
112enum c4iw_rdev_flags { 114enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
183 atomic_t wr_log_idx; 185 atomic_t wr_log_idx;
184 struct wr_log_entry *wr_log; 186 struct wr_log_entry *wr_log;
185 int wr_log_size; 187 int wr_log_size;
188 struct workqueue_struct *free_workq;
186}; 189};
187 190
188static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) 191static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
480 wait_queue_head_t wait; 483 wait_queue_head_t wait;
481 struct timer_list timer; 484 struct timer_list timer;
482 int sq_sig_all; 485 int sq_sig_all;
483 struct completion rq_drained; 486 struct work_struct free_work;
484 struct completion sq_drained; 487 struct c4iw_ucontext *ucontext;
485}; 488};
486 489
487static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) 490static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
495 u32 key; 498 u32 key;
496 spinlock_t mmap_lock; 499 spinlock_t mmap_lock;
497 struct list_head mmaps; 500 struct list_head mmaps;
501 struct kref kref;
498}; 502};
499 503
500static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 504static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
502 return container_of(c, struct c4iw_ucontext, ibucontext); 506 return container_of(c, struct c4iw_ucontext, ibucontext);
503} 507}
504 508
509void _c4iw_free_ucontext(struct kref *kref);
510
511static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
512{
513 kref_put(&ucontext->kref, _c4iw_free_ucontext);
514}
515
516static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
517{
518 kref_get(&ucontext->kref);
519}
520
505struct c4iw_mm_entry { 521struct c4iw_mm_entry {
506 struct list_head entry; 522 struct list_head entry;
507 u64 addr; 523 u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
615 return IB_QPS_ERR; 631 return IB_QPS_ERR;
616} 632}
617 633
634#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
635
618static inline u32 c4iw_ib_to_tpt_access(int a) 636static inline u32 c4iw_ib_to_tpt_access(int a)
619{ 637{
620 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 638 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
997extern int db_fc_threshold; 1015extern int db_fc_threshold;
998extern int db_coalescing_threshold; 1016extern int db_coalescing_threshold;
999extern int use_dsgl; 1017extern int use_dsgl;
1000void c4iw_drain_rq(struct ib_qp *qp);
1001void c4iw_drain_sq(struct ib_qp *qp);
1002void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1018void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1003 1019
1004#endif 1020#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 49b51b7e0fd7..3345e1c312f7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
93 return -ENOSYS; 93 return -ENOSYS;
94} 94}
95 95
96static int c4iw_dealloc_ucontext(struct ib_ucontext *context) 96void _c4iw_free_ucontext(struct kref *kref)
97{ 97{
98 struct c4iw_dev *rhp = to_c4iw_dev(context->device); 98 struct c4iw_ucontext *ucontext;
99 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); 99 struct c4iw_dev *rhp;
100 struct c4iw_mm_entry *mm, *tmp; 100 struct c4iw_mm_entry *mm, *tmp;
101 101
102 PDBG("%s context %p\n", __func__, context); 102 ucontext = container_of(kref, struct c4iw_ucontext, kref);
103 rhp = to_c4iw_dev(ucontext->ibucontext.device);
104
105 PDBG("%s ucontext %p\n", __func__, ucontext);
103 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
104 kfree(mm); 107 kfree(mm);
105 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); 108 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
106 kfree(ucontext); 109 kfree(ucontext);
110}
111
112static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
113{
114 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
115
116 PDBG("%s context %p\n", __func__, context);
117 c4iw_put_ucontext(ucontext);
107 return 0; 118 return 0;
108} 119}
109 120
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
127 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); 138 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
128 INIT_LIST_HEAD(&context->mmaps); 139 INIT_LIST_HEAD(&context->mmaps);
129 spin_lock_init(&context->mmap_lock); 140 spin_lock_init(&context->mmap_lock);
141 kref_init(&context->kref);
130 142
131 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { 143 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
132 if (!warned++) 144 if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
361 373
362 memset(props, 0, sizeof(struct ib_port_attr)); 374 memset(props, 0, sizeof(struct ib_port_attr));
363 props->max_mtu = IB_MTU_4096; 375 props->max_mtu = IB_MTU_4096;
364 if (netdev->mtu >= 4096) 376 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
365 props->active_mtu = IB_MTU_4096;
366 else if (netdev->mtu >= 2048)
367 props->active_mtu = IB_MTU_2048;
368 else if (netdev->mtu >= 1024)
369 props->active_mtu = IB_MTU_1024;
370 else if (netdev->mtu >= 512)
371 props->active_mtu = IB_MTU_512;
372 else
373 props->active_mtu = IB_MTU_256;
374 377
375 if (!netif_carrier_ok(netdev)) 378 if (!netif_carrier_ok(netdev))
376 props->state = IB_PORT_DOWN; 379 props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
607 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 610 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
608 dev->ibdev.get_port_immutable = c4iw_port_immutable; 611 dev->ibdev.get_port_immutable = c4iw_port_immutable;
609 dev->ibdev.get_dev_fw_str = get_dev_fw_str; 612 dev->ibdev.get_dev_fw_str = get_dev_fw_str;
610 dev->ibdev.drain_sq = c4iw_drain_sq;
611 dev->ibdev.drain_rq = c4iw_drain_rq;
612 613
613 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 614 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
614 if (!dev->ibdev.iwcm) 615 if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cda5542e13a2..04c1c382dedb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
715 return 0; 715 return 0;
716} 716}
717 717
718static void _free_qp(struct kref *kref) 718static void free_qp_work(struct work_struct *work)
719{
720 struct c4iw_ucontext *ucontext;
721 struct c4iw_qp *qhp;
722 struct c4iw_dev *rhp;
723
724 qhp = container_of(work, struct c4iw_qp, free_work);
725 ucontext = qhp->ucontext;
726 rhp = qhp->rhp;
727
728 PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
729 destroy_qp(&rhp->rdev, &qhp->wq,
730 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
731
732 if (ucontext)
733 c4iw_put_ucontext(ucontext);
734 kfree(qhp);
735}
736
737static void queue_qp_free(struct kref *kref)
719{ 738{
720 struct c4iw_qp *qhp; 739 struct c4iw_qp *qhp;
721 740
722 qhp = container_of(kref, struct c4iw_qp, kref); 741 qhp = container_of(kref, struct c4iw_qp, kref);
723 PDBG("%s qhp %p\n", __func__, qhp); 742 PDBG("%s qhp %p\n", __func__, qhp);
724 kfree(qhp); 743 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
725} 744}
726 745
727void c4iw_qp_add_ref(struct ib_qp *qp) 746void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
733void c4iw_qp_rem_ref(struct ib_qp *qp) 752void c4iw_qp_rem_ref(struct ib_qp *qp)
734{ 753{
735 PDBG("%s ib_qp %p\n", __func__, qp); 754 PDBG("%s ib_qp %p\n", __func__, qp);
736 kref_put(&to_c4iw_qp(qp)->kref, _free_qp); 755 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
737} 756}
738 757
739static void add_to_fc_list(struct list_head *head, struct list_head *entry) 758static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
776 return 0; 795 return 0;
777} 796}
778 797
798static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
799{
800 struct t4_cqe cqe = {};
801 struct c4iw_cq *schp;
802 unsigned long flag;
803 struct t4_cq *cq;
804
805 schp = to_c4iw_cq(qhp->ibqp.send_cq);
806 cq = &schp->cq;
807
808 cqe.u.drain_cookie = wr->wr_id;
809 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
810 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
811 CQE_TYPE_V(1) |
812 CQE_SWCQE_V(1) |
813 CQE_QPID_V(qhp->wq.sq.qid));
814
815 spin_lock_irqsave(&schp->lock, flag);
816 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
817 cq->sw_queue[cq->sw_pidx] = cqe;
818 t4_swcq_produce(cq);
819 spin_unlock_irqrestore(&schp->lock, flag);
820
821 spin_lock_irqsave(&schp->comp_handler_lock, flag);
822 (*schp->ibcq.comp_handler)(&schp->ibcq,
823 schp->ibcq.cq_context);
824 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
825}
826
827static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
828{
829 struct t4_cqe cqe = {};
830 struct c4iw_cq *rchp;
831 unsigned long flag;
832 struct t4_cq *cq;
833
834 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
835 cq = &rchp->cq;
836
837 cqe.u.drain_cookie = wr->wr_id;
838 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
839 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
840 CQE_TYPE_V(0) |
841 CQE_SWCQE_V(1) |
842 CQE_QPID_V(qhp->wq.sq.qid));
843
844 spin_lock_irqsave(&rchp->lock, flag);
845 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
846 cq->sw_queue[cq->sw_pidx] = cqe;
847 t4_swcq_produce(cq);
848 spin_unlock_irqrestore(&rchp->lock, flag);
849
850 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
851 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
852 rchp->ibcq.cq_context);
853 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
854}
855
779int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 856int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
780 struct ib_send_wr **bad_wr) 857 struct ib_send_wr **bad_wr)
781{ 858{
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
794 spin_lock_irqsave(&qhp->lock, flag); 871 spin_lock_irqsave(&qhp->lock, flag);
795 if (t4_wq_in_error(&qhp->wq)) { 872 if (t4_wq_in_error(&qhp->wq)) {
796 spin_unlock_irqrestore(&qhp->lock, flag); 873 spin_unlock_irqrestore(&qhp->lock, flag);
797 *bad_wr = wr; 874 complete_sq_drain_wr(qhp, wr);
798 return -EINVAL; 875 return err;
799 } 876 }
800 num_wrs = t4_sq_avail(&qhp->wq); 877 num_wrs = t4_sq_avail(&qhp->wq);
801 if (num_wrs == 0) { 878 if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
937 spin_lock_irqsave(&qhp->lock, flag); 1014 spin_lock_irqsave(&qhp->lock, flag);
938 if (t4_wq_in_error(&qhp->wq)) { 1015 if (t4_wq_in_error(&qhp->wq)) {
939 spin_unlock_irqrestore(&qhp->lock, flag); 1016 spin_unlock_irqrestore(&qhp->lock, flag);
940 *bad_wr = wr; 1017 complete_rq_drain_wr(qhp, wr);
941 return -EINVAL; 1018 return err;
942 } 1019 }
943 num_wrs = t4_rq_avail(&qhp->wq); 1020 num_wrs = t4_rq_avail(&qhp->wq);
944 if (num_wrs == 0) { 1021 if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1550 } 1627 }
1551 break; 1628 break;
1552 case C4IW_QP_STATE_CLOSING: 1629 case C4IW_QP_STATE_CLOSING:
1553 if (!internal) { 1630
1631 /*
1632 * Allow kernel users to move to ERROR for qp draining.
1633 */
1634 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1635 C4IW_QP_STATE_ERROR)) {
1554 ret = -EINVAL; 1636 ret = -EINVAL;
1555 goto out; 1637 goto out;
1556 } 1638 }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1643 struct c4iw_dev *rhp; 1725 struct c4iw_dev *rhp;
1644 struct c4iw_qp *qhp; 1726 struct c4iw_qp *qhp;
1645 struct c4iw_qp_attributes attrs; 1727 struct c4iw_qp_attributes attrs;
1646 struct c4iw_ucontext *ucontext;
1647 1728
1648 qhp = to_c4iw_qp(ib_qp); 1729 qhp = to_c4iw_qp(ib_qp);
1649 rhp = qhp->rhp; 1730 rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1663 spin_unlock_irq(&rhp->lock); 1744 spin_unlock_irq(&rhp->lock);
1664 free_ird(rhp, qhp->attr.max_ird); 1745 free_ird(rhp, qhp->attr.max_ird);
1665 1746
1666 ucontext = ib_qp->uobject ?
1667 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1668 destroy_qp(&rhp->rdev, &qhp->wq,
1669 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1670
1671 c4iw_qp_rem_ref(ib_qp); 1747 c4iw_qp_rem_ref(ib_qp);
1672 1748
1673 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); 1749 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1763 qhp->attr.max_ird = 0; 1839 qhp->attr.max_ird = 0;
1764 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1840 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1765 spin_lock_init(&qhp->lock); 1841 spin_lock_init(&qhp->lock);
1766 init_completion(&qhp->sq_drained);
1767 init_completion(&qhp->rq_drained);
1768 mutex_init(&qhp->mutex); 1842 mutex_init(&qhp->mutex);
1769 init_waitqueue_head(&qhp->wait); 1843 init_waitqueue_head(&qhp->wait);
1770 kref_init(&qhp->kref); 1844 kref_init(&qhp->kref);
1845 INIT_WORK(&qhp->free_work, free_qp_work);
1771 1846
1772 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1847 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1773 if (ret) 1848 if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1854 ma_sync_key_mm->len = PAGE_SIZE; 1929 ma_sync_key_mm->len = PAGE_SIZE;
1855 insert_mmap(ucontext, ma_sync_key_mm); 1930 insert_mmap(ucontext, ma_sync_key_mm);
1856 } 1931 }
1932
1933 c4iw_get_ucontext(ucontext);
1934 qhp->ucontext = ucontext;
1857 } 1935 }
1858 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1936 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1859 init_timer(&(qhp->timer)); 1937 init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1958 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 2036 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1959 return 0; 2037 return 0;
1960} 2038}
1961
1962static void move_qp_to_err(struct c4iw_qp *qp)
1963{
1964 struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
1965
1966 (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1967}
1968
1969void c4iw_drain_sq(struct ib_qp *ibqp)
1970{
1971 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1972 unsigned long flag;
1973 bool need_to_wait;
1974
1975 move_qp_to_err(qp);
1976 spin_lock_irqsave(&qp->lock, flag);
1977 need_to_wait = !t4_sq_empty(&qp->wq);
1978 spin_unlock_irqrestore(&qp->lock, flag);
1979
1980 if (need_to_wait)
1981 wait_for_completion(&qp->sq_drained);
1982}
1983
1984void c4iw_drain_rq(struct ib_qp *ibqp)
1985{
1986 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1987 unsigned long flag;
1988 bool need_to_wait;
1989
1990 move_qp_to_err(qp);
1991 spin_lock_irqsave(&qp->lock, flag);
1992 need_to_wait = !t4_rq_empty(&qp->wq);
1993 spin_unlock_irqrestore(&qp->lock, flag);
1994
1995 if (need_to_wait)
1996 wait_for_completion(&qp->rq_drained);
1997}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381aa83c8..640d22148a3e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,6 +179,7 @@ struct t4_cqe {
179 __be32 wrid_hi; 179 __be32 wrid_hi;
180 __be32 wrid_low; 180 __be32 wrid_low;
181 } gen; 181 } gen;
182 u64 drain_cookie;
182 } u; 183 } u;
183 __be64 reserved; 184 __be64 reserved;
184 __be64 bits_type_ts; 185 __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
238/* generic accessor macros */ 239/* generic accessor macros */
239#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) 240#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
240#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 241#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
242#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
241 243
242/* macros for flit 3 of the cqe */ 244/* macros for flit 3 of the cqe */
243#define CQE_GENBIT_S 63 245#define CQE_GENBIT_S 63
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 29e97df9e1a7..4c000d60d5c6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
100 memset(props, 0, sizeof(*props)); 100 memset(props, 0, sizeof(*props));
101 101
102 props->max_mtu = IB_MTU_4096; 102 props->max_mtu = IB_MTU_4096;
103 if (netdev->mtu >= 4096) 103 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
104 props->active_mtu = IB_MTU_4096;
105 else if (netdev->mtu >= 2048)
106 props->active_mtu = IB_MTU_2048;
107 else if (netdev->mtu >= 1024)
108 props->active_mtu = IB_MTU_1024;
109 else if (netdev->mtu >= 512)
110 props->active_mtu = IB_MTU_512;
111 else
112 props->active_mtu = IB_MTU_256;
113 104
114 props->lid = 1; 105 props->lid = 1;
115 if (netif_carrier_ok(iwdev->netdev)) 106 if (netif_carrier_ok(iwdev->netdev))
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index aff9fb14768b..5a31f3c6a421 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
478 memset(props, 0, sizeof(*props)); 478 memset(props, 0, sizeof(*props));
479 479
480 props->max_mtu = IB_MTU_4096; 480 props->max_mtu = IB_MTU_4096;
481 481 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
482 if (netdev->mtu >= 4096)
483 props->active_mtu = IB_MTU_4096;
484 else if (netdev->mtu >= 2048)
485 props->active_mtu = IB_MTU_2048;
486 else if (netdev->mtu >= 1024)
487 props->active_mtu = IB_MTU_1024;
488 else if (netdev->mtu >= 512)
489 props->active_mtu = IB_MTU_512;
490 else
491 props->active_mtu = IB_MTU_256;
492 482
493 props->lid = 1; 483 props->lid = 1;
494 props->lmc = 0; 484 props->lmc = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..3ac8aa5ef37d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
576 return 0; 576 return 0;
577} 577}
578 578
579void qedr_unaffiliated_event(void *context, 579void qedr_unaffiliated_event(void *context, u8 event_code)
580 u8 event_code)
581{ 580{
582 pr_err("unaffiliated event not implemented yet\n"); 581 pr_err("unaffiliated event not implemented yet\n");
583} 582}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
792 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 791 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
793 goto sysfs_err; 792 goto sysfs_err;
794 793
794 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
795 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
796
795 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 797 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
796 return dev; 798 return dev;
797 799
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
824 ib_dealloc_device(&dev->ibdev); 826 ib_dealloc_device(&dev->ibdev);
825} 827}
826 828
827static int qedr_close(struct qedr_dev *dev) 829static void qedr_close(struct qedr_dev *dev)
828{ 830{
829 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 831 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
830 832 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
831 return 0;
832} 833}
833 834
834static void qedr_shutdown(struct qedr_dev *dev) 835static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
837 qedr_remove(dev); 838 qedr_remove(dev);
838} 839}
839 840
841static void qedr_open(struct qedr_dev *dev)
842{
843 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
844 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
845}
846
840static void qedr_mac_address_change(struct qedr_dev *dev) 847static void qedr_mac_address_change(struct qedr_dev *dev)
841{ 848{
842 union ib_gid *sgid = &dev->sgid_tbl[0]; 849 union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
863 870
864 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 871 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
865 872
866 qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); 873 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
867 874
868 if (rc) 875 if (rc)
869 DP_ERR(dev, "Error updating mac filter\n"); 876 DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
877{ 884{
878 switch (event) { 885 switch (event) {
879 case QEDE_UP: 886 case QEDE_UP:
880 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 887 qedr_open(dev);
881 break; 888 break;
882 case QEDE_DOWN: 889 case QEDE_DOWN:
883 qedr_close(dev); 890 qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..bb32e4792ec9 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
113 struct qed_rdma_events events; 113 struct qed_rdma_events events;
114}; 114};
115 115
116#define QEDR_ENET_STATE_BIT (0)
117
116struct qedr_dev { 118struct qedr_dev {
117 struct ib_device ibdev; 119 struct ib_device ibdev;
118 struct qed_dev *cdev; 120 struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
153 struct qedr_cq *gsi_sqcq; 155 struct qedr_cq *gsi_sqcq;
154 struct qedr_cq *gsi_rqcq; 156 struct qedr_cq *gsi_rqcq;
155 struct qedr_qp *gsi_qp; 157 struct qedr_qp *gsi_qp;
158
159 unsigned long enet_state;
156}; 160};
157 161
158#define QEDR_MAX_SQ_PBL (0x8000) 162#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
188#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 192#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
189 193
190#define QEDR_MAX_PORT (1) 194#define QEDR_MAX_PORT (1)
195#define QEDR_PORT (1)
191 196
192#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 197#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
193 198
@@ -251,9 +256,6 @@ struct qedr_cq {
251 256
252 u16 icid; 257 u16 icid;
253 258
254 /* Lock to protect completion handler */
255 spinlock_t comp_handler_lock;
256
257 /* Lock to protect multiplem CQ's */ 259 /* Lock to protect multiplem CQ's */
258 spinlock_t cq_lock; 260 spinlock_t cq_lock;
259 u8 arm_flags; 261 u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890ebb72bd..a9a8d8745d2e 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
87 qedr_inc_sw_gsi_cons(&qp->sq); 87 qedr_inc_sw_gsi_cons(&qp->sq);
88 spin_unlock_irqrestore(&qp->q_lock, flags); 88 spin_unlock_irqrestore(&qp->q_lock, flags);
89 89
90 if (cq->ibcq.comp_handler) { 90 if (cq->ibcq.comp_handler)
91 spin_lock_irqsave(&cq->comp_handler_lock, flags);
92 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 91 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
93 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
94 }
95} 92}
96 93
97void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, 94void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
113 110
114 spin_unlock_irqrestore(&qp->q_lock, flags); 111 spin_unlock_irqrestore(&qp->q_lock, flags);
115 112
116 if (cq->ibcq.comp_handler) { 113 if (cq->ibcq.comp_handler)
117 spin_lock_irqsave(&cq->comp_handler_lock, flags);
118 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 114 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
119 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
120 }
121} 115}
122 116
123static void qedr_destroy_gsi_cq(struct qedr_dev *dev, 117static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
404 } 398 }
405 399
406 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) 400 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
407 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
408 else
409 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; 401 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
402 else
403 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
410 404
411 packet->roce_mode = roce_mode; 405 packet->roce_mode = roce_mode;
412 memcpy(packet->header.vaddr, ud_header_buffer, header_size); 406 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de208077..c7d6c9a783bd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata) 471 struct ib_ucontext *context, struct ib_udata *udata)
472{ 472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev); 473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd; 474 struct qedr_pd *pd;
477 u16 pd_id; 475 u16 pd_id;
478 int rc; 476 int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
489 if (!pd) 487 if (!pd)
490 return ERR_PTR(-ENOMEM); 488 return ERR_PTR(-ENOMEM);
491 489
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); 490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
491 if (rc)
492 goto err;
493 493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id; 494 pd->pd_id = pd_id;
496 495
497 if (udata && context) { 496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
498
499 uresp.pd_id = pd_id;
500
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc) 502 if (rc) {
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context); 504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
502 uctx->pd = pd; 505 goto err;
503 pd->uctx = uctx; 506 }
507
508 pd->uctx = get_qedr_ucontext(context);
509 pd->uctx->pd = pd;
504 } 510 }
505 511
506 return &pd->ibpd; 512 return &pd->ibpd;
513
514err:
515 kfree(pd);
516 return ERR_PTR(rc);
507} 517}
508 518
509int qedr_dealloc_pd(struct ib_pd *ibpd) 519int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
1600 return ERR_PTR(-EFAULT); 1610 return ERR_PTR(-EFAULT);
1601} 1611}
1602 1612
1603enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1613static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1604{ 1614{
1605 switch (qp_state) { 1615 switch (qp_state) {
1606 case QED_ROCE_QP_STATE_RESET: 1616 case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1621 return IB_QPS_ERR; 1631 return IB_QPS_ERR;
1622} 1632}
1623 1633
1624enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) 1634static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1635 enum ib_qp_state qp_state)
1625{ 1636{
1626 switch (qp_state) { 1637 switch (qp_state) {
1627 case IB_QPS_RESET: 1638 case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1657 int status = 0; 1668 int status = 0;
1658 1669
1659 if (new_state == qp->state) 1670 if (new_state == qp->state)
1660 return 1; 1671 return 0;
1661 1672
1662 switch (qp->state) { 1673 switch (qp->state) {
1663 case QED_ROCE_QP_STATE_RESET: 1674 case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1733 /* ERR->XXX */ 1744 /* ERR->XXX */
1734 switch (new_state) { 1745 switch (new_state) {
1735 case QED_ROCE_QP_STATE_RESET: 1746 case QED_ROCE_QP_STATE_RESET:
1747 if ((qp->rq.prod != qp->rq.cons) ||
1748 (qp->sq.prod != qp->sq.cons)) {
1749 DP_NOTICE(dev,
1750 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1751 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1752 qp->sq.cons);
1753 status = -EINVAL;
1754 }
1736 break; 1755 break;
1737 default: 1756 default:
1738 status = -EINVAL; 1757 status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1865 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1884 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1866 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1885 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1867 qp_params.remote_mac_addr); 1886 qp_params.remote_mac_addr);
1868;
1869 1887
1870 qp_params.mtu = qp->mtu; 1888 qp_params.mtu = qp->mtu;
1871 qp_params.lb_indication = false; 1889 qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2016 2034
2017 qp_attr->qp_state = qedr_get_ibqp_state(params.state); 2035 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2018 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 2036 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2019 qp_attr->path_mtu = iboe_get_mtu(params.mtu); 2037 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2020 qp_attr->path_mig_state = IB_MIG_MIGRATED; 2038 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2021 qp_attr->rq_psn = params.rq_psn; 2039 qp_attr->rq_psn = params.rq_psn;
2022 qp_attr->sq_psn = params.sq_psn; 2040 qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2028 qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2046 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2029 qp_attr->cap.max_send_sge = qp->sq.max_sges; 2047 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2030 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2048 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2031 qp_attr->cap.max_inline_data = qp->max_inline_data; 2049 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2032 qp_init_attr->cap = qp_attr->cap; 2050 qp_init_attr->cap = qp_attr->cap;
2033 2051
2034 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], 2052 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
2302 return rc; 2320 return rc;
2303} 2321}
2304 2322
2305struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) 2323static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2324 int max_page_list_len)
2306{ 2325{
2307 struct qedr_pd *pd = get_qedr_pd(ibpd); 2326 struct qedr_pd *pd = get_qedr_pd(ibpd);
2308 struct qedr_dev *dev = get_qedr_dev(ibpd->device); 2327 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
2704 return 0; 2723 return 0;
2705} 2724}
2706 2725
2707enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) 2726static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2708{ 2727{
2709 switch (opcode) { 2728 switch (opcode) {
2710 case IB_WR_RDMA_WRITE: 2729 case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2729 } 2748 }
2730} 2749}
2731 2750
2732inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) 2751static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2733{ 2752{
2734 int wq_is_full, err_wr, pbl_is_full; 2753 int wq_is_full, err_wr, pbl_is_full;
2735 struct qedr_dev *dev = qp->dev; 2754 struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2766 return true; 2785 return true;
2767} 2786}
2768 2787
2769int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2788static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2770 struct ib_send_wr **bad_wr) 2789 struct ib_send_wr **bad_wr)
2771{ 2790{
2772 struct qedr_dev *dev = get_qedr_dev(ibqp->device); 2791 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
3234 IB_WC_SUCCESS, 0); 3253 IB_WC_SUCCESS, 0);
3235 break; 3254 break;
3236 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: 3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3237 DP_ERR(dev, 3256 if (qp->state != QED_ROCE_QP_STATE_ERR)
3238 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", 3257 DP_ERR(dev,
3239 cq->icid, qp->icid); 3258 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3240 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, 3260 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3241 IB_WC_WR_FLUSH_ERR, 1); 3261 IB_WC_WR_FLUSH_ERR, 1);
3242 break; 3262 break;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 231a1ce1f4be..bd8fbd3d2032 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
1029 if (ret) { 1029 if (ret) {
1030 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 1030 dev_err(&pdev->dev, "failed to allocate interrupts\n");
1031 ret = -ENOMEM; 1031 ret = -ENOMEM;
1032 goto err_netdevice; 1032 goto err_free_cq_ring;
1033 } 1033 }
1034 1034
1035 /* Allocate UAR table. */ 1035 /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
1092err_free_intrs: 1092err_free_intrs:
1093 pvrdma_free_irq(dev); 1093 pvrdma_free_irq(dev);
1094 pvrdma_disable_msi_all(dev); 1094 pvrdma_disable_msi_all(dev);
1095err_netdevice:
1096 unregister_netdevice_notifier(&dev->nb_netdev);
1097err_free_cq_ring: 1095err_free_cq_ring:
1098 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1096 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1099err_free_async_ring: 1097err_free_async_ring:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 54891370d18a..c2aa52638dcb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
306 union pvrdma_cmd_resp rsp; 306 union pvrdma_cmd_resp rsp;
307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc; 307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; 308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
309 struct pvrdma_alloc_ucontext_resp uresp; 309 struct pvrdma_alloc_ucontext_resp uresp = {0};
310 int ret; 310 int ret;
311 void *ptr; 311 void *ptr;
312 312
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 342e78163613..4abdeb359fb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
555 } 555 }
556 556
557 spin_lock_bh(&dev_list_lock); 557 spin_lock_bh(&dev_list_lock);
558 list_add_tail(&rxe_dev_list, &rxe->list); 558 list_add_tail(&rxe->list, &rxe_dev_list);
559 spin_unlock_bh(&dev_list_lock); 559 spin_unlock_bh(&dev_list_lock);
560 return rxe; 560 return rxe;
561} 561}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 486d576e55bc..44b2108253bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
813 del_timer_sync(&qp->rnr_nak_timer); 813 del_timer_sync(&qp->rnr_nak_timer);
814 814
815 rxe_cleanup_task(&qp->req.task); 815 rxe_cleanup_task(&qp->req.task);
816 if (qp_type(qp) == IB_QPT_RC) 816 rxe_cleanup_task(&qp->comp.task);
817 rxe_cleanup_task(&qp->comp.task);
818 817
819 /* flush out any receive wr's or pending requests */ 818 /* flush out any receive wr's or pending requests */
820 __rxe_do_task(&qp->req.task); 819 __rxe_do_task(&qp->req.task);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9104e6b8cac9..e71af717e71b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
651 SHOST_DIX_GUARD_CRC); 651 SHOST_DIX_GUARD_CRC);
652 } 652 }
653 653
654 /*
655 * Limit the sg_tablesize and max_sectors based on the device
656 * max fastreg page list length.
657 */
658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
660
661 if (iscsi_host_add(shost, 654 if (iscsi_host_add(shost,
662 ib_conn->device->ib_device->dma_device)) { 655 ib_conn->device->ib_device->dma_device)) {
663 mutex_unlock(&iser_conn->state_mutex); 656 mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
679 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; 672 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
680 shost->max_sectors = min(iser_max_sectors, max_fr_sectors); 673 shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
681 674
675 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
676 iser_conn, shost->sg_tablesize,
677 shost->max_sectors);
678
682 if (cmds_max > max_cmds) { 679 if (cmds_max > max_cmds) {
683 iser_info("cmds_max changed from %u to %u\n", 680 iser_info("cmds_max changed from %u to %u\n",
684 cmds_max, max_cmds); 681 cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c5ddb5..9d0b22ad58c1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -496,7 +496,6 @@ struct ib_conn {
496 * @rx_descs: rx buffers array (cyclic buffer) 496 * @rx_descs: rx buffers array (cyclic buffer)
497 * @num_rx_descs: number of rx descriptors 497 * @num_rx_descs: number of rx descriptors
498 * @scsi_sg_tablesize: scsi host sg_tablesize 498 * @scsi_sg_tablesize: scsi host sg_tablesize
499 * @scsi_max_sectors: scsi host max sectors
500 */ 499 */
501struct iser_conn { 500struct iser_conn {
502 struct ib_conn ib_conn; 501 struct ib_conn ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
519 struct iser_rx_desc *rx_descs; 518 struct iser_rx_desc *rx_descs;
520 u32 num_rx_descs; 519 u32 num_rx_descs;
521 unsigned short scsi_sg_tablesize; 520 unsigned short scsi_sg_tablesize;
522 unsigned int scsi_max_sectors;
523 bool snd_w_inv; 521 bool snd_w_inv;
524}; 522};
525 523
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8ae7a3beddb7..6a9d1cb548ee 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
708 device->ib_device->attrs.max_fast_reg_page_list_len); 708 device->ib_device->attrs.max_fast_reg_page_list_len);
709 709
710 if (sg_tablesize > sup_sg_tablesize) { 710 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
711 sg_tablesize = sup_sg_tablesize;
712 iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
713 } else {
714 iser_conn->scsi_max_sectors = max_sectors;
715 }
716
717 iser_conn->scsi_sg_tablesize = sg_tablesize;
718
719 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
720 iser_conn, iser_conn->scsi_sg_tablesize,
721 iser_conn->scsi_max_sectors);
722} 711}
723 712
724/** 713/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ddc07123193..79bf48477ddb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
371 struct srp_fr_desc *d; 371 struct srp_fr_desc *d;
372 struct ib_mr *mr; 372 struct ib_mr *mr;
373 int i, ret = -EINVAL; 373 int i, ret = -EINVAL;
374 enum ib_mr_type mr_type;
374 375
375 if (pool_size <= 0) 376 if (pool_size <= 0)
376 goto err; 377 goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
384 spin_lock_init(&pool->lock); 385 spin_lock_init(&pool->lock);
385 INIT_LIST_HEAD(&pool->free_list); 386 INIT_LIST_HEAD(&pool->free_list);
386 387
388 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
389 mr_type = IB_MR_TYPE_SG_GAPS;
390 else
391 mr_type = IB_MR_TYPE_MEM_REG;
392
387 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 393 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
388 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 394 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
389 max_page_list_len);
390 if (IS_ERR(mr)) { 395 if (IS_ERR(mr)) {
391 ret = PTR_ERR(mr); 396 ret = PTR_ERR(mr);
392 if (ret == -ENOMEM) 397 if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
3694 indirect_sg_entries = cmd_sg_entries; 3699 indirect_sg_entries = cmd_sg_entries;
3695 } 3700 }
3696 3701
3702 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3703 pr_warn("Clamping indirect_sg_entries to %u\n",
3704 SG_MAX_SEGMENTS);
3705 indirect_sg_entries = SG_MAX_SEGMENTS;
3706 }
3707
3697 srp_remove_wq = create_workqueue("srp_remove"); 3708 srp_remove_wq = create_workqueue("srp_remove");
3698 if (!srp_remove_wq) { 3709 if (!srp_remove_wq) {
3699 ret = -ENOMEM; 3710 ret = -ENOMEM;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 11447ab1055c..bf5c36e229ba 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
901 data->enabled = true; 901 data->enabled = true;
902 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { 902 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
903 retval = disable_irq_wake(irq); 903 retval = disable_irq_wake(irq);
904 if (!retval) 904 if (retval)
905 dev_warn(&rmi_dev->dev, 905 dev_warn(&rmi_dev->dev,
906 "Failed to disable irq for wake: %d\n", 906 "Failed to disable irq for wake: %d\n",
907 retval); 907 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
936 disable_irq(irq); 936 disable_irq(irq);
937 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { 937 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
938 retval = enable_irq_wake(irq); 938 retval = enable_irq_wake(irq);
939 if (!retval) 939 if (retval)
940 dev_warn(&rmi_dev->dev, 940 dev_warn(&rmi_dev->dev,
941 "Failed to enable irq for wake: %d\n", 941 "Failed to enable irq for wake: %d\n",
942 retval); 942 retval);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 83cf11312fd9..c9d1c91e1887 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
682 } 682 }
683 platform_set_drvdata(wm->battery_dev, wm); 683 platform_set_drvdata(wm->battery_dev, wm);
684 wm->battery_dev->dev.parent = dev; 684 wm->battery_dev->dev.parent = dev;
685 wm->battery_dev->dev.platform_data = pdata->batt_pdata; 685 wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
686 ret = platform_device_add(wm->battery_dev); 686 ret = platform_device_add(wm->battery_dev);
687 if (ret < 0) 687 if (ret < 0)
688 goto batt_reg_err; 688 goto batt_reg_err;
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 1a1d99704fe6..296f1411fe84 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
11297 ((CAPI_MSG *) msg)->header.ncci = 0; 11297 ((CAPI_MSG *) msg)->header.ncci = 0;
11298 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; 11298 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
11299 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; 11299 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
11300 PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); 11300 ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
11301 ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
11301 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; 11302 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
11302 w = api_put(notify_plci->appl, (CAPI_MSG *) msg); 11303 w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
11303 if (w != _QUEUE_FULL) 11304 if (w != _QUEUE_FULL)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7c6c57216bf2..8a9f742d8ed7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
1534 return PTR_ERR(key); 1534 return PTR_ERR(key);
1535 } 1535 }
1536 1536
1537 rcu_read_lock(); 1537 down_read(&key->sem);
1538 1538
1539 ukp = user_key_payload(key); 1539 ukp = user_key_payload(key);
1540 if (!ukp) { 1540 if (!ukp) {
1541 rcu_read_unlock(); 1541 up_read(&key->sem);
1542 key_put(key); 1542 key_put(key);
1543 kzfree(new_key_string); 1543 kzfree(new_key_string);
1544 return -EKEYREVOKED; 1544 return -EKEYREVOKED;
1545 } 1545 }
1546 1546
1547 if (cc->key_size != ukp->datalen) { 1547 if (cc->key_size != ukp->datalen) {
1548 rcu_read_unlock(); 1548 up_read(&key->sem);
1549 key_put(key); 1549 key_put(key);
1550 kzfree(new_key_string); 1550 kzfree(new_key_string);
1551 return -EINVAL; 1551 return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
1553 1553
1554 memcpy(cc->key, ukp->data, cc->key_size); 1554 memcpy(cc->key, ukp->data, cc->key_size);
1555 1555
1556 rcu_read_unlock(); 1556 up_read(&key->sem);
1557 key_put(key); 1557 key_put(key);
1558 1558
1559 /* clear the flag since following operations may invalidate previously valid key */ 1559 /* clear the flag since following operations may invalidate previously valid key */
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6400cffb986d..3570bcb7a4a4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
427 unsigned long flags; 427 unsigned long flags;
428 struct priority_group *pg; 428 struct priority_group *pg;
429 struct pgpath *pgpath; 429 struct pgpath *pgpath;
430 bool bypassed = true; 430 unsigned bypassed = 1;
431 431
432 if (!atomic_read(&m->nr_valid_paths)) { 432 if (!atomic_read(&m->nr_valid_paths)) {
433 clear_bit(MPATHF_QUEUE_IO, &m->flags); 433 clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +466,7 @@ check_current_pg:
466 */ 466 */
467 do { 467 do {
468 list_for_each_entry(pg, &m->priority_groups, list) { 468 list_for_each_entry(pg, &m->priority_groups, list) {
469 if (pg->bypassed == bypassed) 469 if (pg->bypassed == !!bypassed)
470 continue; 470 continue;
471 pgpath = choose_path_in_pg(m, pg, nr_bytes); 471 pgpath = choose_path_in_pg(m, pg, nr_bytes);
472 if (!IS_ERR_OR_NULL(pgpath)) { 472 if (!IS_ERR_OR_NULL(pgpath)) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d7275fb541a..6e702fc69a83 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q)
779 int srcu_idx; 779 int srcu_idx;
780 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 780 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
781 781
782 if (unlikely(!map)) {
783 dm_put_live_table(md, srcu_idx);
784 return;
785 }
782 ti = dm_table_find_target(map, pos); 786 ti = dm_table_find_target(map, pos);
783 dm_put_live_table(md, srcu_idx); 787 dm_put_live_table(md, srcu_idx);
784 } 788 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 82821ee0d57f..01175dac0db6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
5291 if (start_readonly && mddev->ro == 0) 5291 if (start_readonly && mddev->ro == 0)
5292 mddev->ro = 2; /* read-only, but switch on first write */ 5292 mddev->ro = 2; /* read-only, but switch on first write */
5293 5293
5294 /*
5295 * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
5296 * up mddev->thread. It is important to initialize critical
5297 * resources for mddev->thread BEFORE calling pers->run().
5298 */
5294 err = pers->run(mddev); 5299 err = pers->run(mddev);
5295 if (err) 5300 if (err)
5296 pr_warn("md: pers->run() failed ...\n"); 5301 pr_warn("md: pers->run() failed ...\n");
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0e8ed2c327b0..302dea3296ba 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -162,6 +162,8 @@ struct r5l_log {
162 162
163 /* to submit async io_units, to fulfill ordering of flush */ 163 /* to submit async io_units, to fulfill ordering of flush */
164 struct work_struct deferred_io_work; 164 struct work_struct deferred_io_work;
165 /* to disable write back during in degraded mode */
166 struct work_struct disable_writeback_work;
165}; 167};
166 168
167/* 169/*
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
611 r5l_do_submit_io(log, io); 613 r5l_do_submit_io(log, io);
612} 614}
613 615
616static void r5c_disable_writeback_async(struct work_struct *work)
617{
618 struct r5l_log *log = container_of(work, struct r5l_log,
619 disable_writeback_work);
620 struct mddev *mddev = log->rdev->mddev;
621
622 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
623 return;
624 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
625 mdname(mddev));
626 mddev_suspend(mddev);
627 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
628 mddev_resume(mddev);
629}
630
614static void r5l_submit_current_io(struct r5l_log *log) 631static void r5l_submit_current_io(struct r5l_log *log)
615{ 632{
616 struct r5l_io_unit *io = log->current_io; 633 struct r5l_io_unit *io = log->current_io;
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
1393 next_checkpoint = r5c_calculate_new_cp(conf); 1410 next_checkpoint = r5c_calculate_new_cp(conf);
1394 spin_unlock_irq(&log->io_list_lock); 1411 spin_unlock_irq(&log->io_list_lock);
1395 1412
1396 BUG_ON(reclaimable < 0);
1397
1398 if (reclaimable == 0 || !write_super) 1413 if (reclaimable == 0 || !write_super)
1399 return; 1414 return;
1400 1415
@@ -2062,7 +2077,7 @@ static int
2062r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, 2077r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2063 struct r5l_recovery_ctx *ctx) 2078 struct r5l_recovery_ctx *ctx)
2064{ 2079{
2065 struct stripe_head *sh, *next; 2080 struct stripe_head *sh;
2066 struct mddev *mddev = log->rdev->mddev; 2081 struct mddev *mddev = log->rdev->mddev;
2067 struct page *page; 2082 struct page *page;
2068 sector_t next_checkpoint = MaxSector; 2083 sector_t next_checkpoint = MaxSector;
@@ -2076,7 +2091,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2076 2091
2077 WARN_ON(list_empty(&ctx->cached_list)); 2092 WARN_ON(list_empty(&ctx->cached_list));
2078 2093
2079 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { 2094 list_for_each_entry(sh, &ctx->cached_list, lru) {
2080 struct r5l_meta_block *mb; 2095 struct r5l_meta_block *mb;
2081 int i; 2096 int i;
2082 int offset; 2097 int offset;
@@ -2126,14 +2141,39 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2126 ctx->pos = write_pos; 2141 ctx->pos = write_pos;
2127 ctx->seq += 1; 2142 ctx->seq += 1;
2128 next_checkpoint = sh->log_start; 2143 next_checkpoint = sh->log_start;
2129 list_del_init(&sh->lru);
2130 raid5_release_stripe(sh);
2131 } 2144 }
2132 log->next_checkpoint = next_checkpoint; 2145 log->next_checkpoint = next_checkpoint;
2133 __free_page(page); 2146 __free_page(page);
2134 return 0; 2147 return 0;
2135} 2148}
2136 2149
2150static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2151 struct r5l_recovery_ctx *ctx)
2152{
2153 struct mddev *mddev = log->rdev->mddev;
2154 struct r5conf *conf = mddev->private;
2155 struct stripe_head *sh, *next;
2156
2157 if (ctx->data_only_stripes == 0)
2158 return;
2159
2160 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2161
2162 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2163 r5c_make_stripe_write_out(sh);
2164 set_bit(STRIPE_HANDLE, &sh->state);
2165 list_del_init(&sh->lru);
2166 raid5_release_stripe(sh);
2167 }
2168
2169 md_wakeup_thread(conf->mddev->thread);
2170 /* reuse conf->wait_for_quiescent in recovery */
2171 wait_event(conf->wait_for_quiescent,
2172 atomic_read(&conf->active_stripes) == 0);
2173
2174 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2175}
2176
2137static int r5l_recovery_log(struct r5l_log *log) 2177static int r5l_recovery_log(struct r5l_log *log)
2138{ 2178{
2139 struct mddev *mddev = log->rdev->mddev; 2179 struct mddev *mddev = log->rdev->mddev;
@@ -2160,32 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
2160 pos = ctx.pos; 2200 pos = ctx.pos;
2161 ctx.seq += 10000; 2201 ctx.seq += 10000;
2162 2202
2163 if (ctx.data_only_stripes == 0) {
2164 log->next_checkpoint = ctx.pos;
2165 r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
2166 ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
2167 }
2168 2203
2169 if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) 2204 if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
2170 pr_debug("md/raid:%s: starting from clean shutdown\n", 2205 pr_debug("md/raid:%s: starting from clean shutdown\n",
2171 mdname(mddev)); 2206 mdname(mddev));
2172 else { 2207 else
2173 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", 2208 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2174 mdname(mddev), ctx.data_only_stripes, 2209 mdname(mddev), ctx.data_only_stripes,
2175 ctx.data_parity_stripes); 2210 ctx.data_parity_stripes);
2176 2211
2177 if (ctx.data_only_stripes > 0) 2212 if (ctx.data_only_stripes == 0) {
2178 if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { 2213 log->next_checkpoint = ctx.pos;
2179 pr_err("md/raid:%s: failed to rewrite stripes to journal\n", 2214 r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
2180 mdname(mddev)); 2215 ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
2181 return -EIO; 2216 } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
2182 } 2217 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2218 mdname(mddev));
2219 return -EIO;
2183 } 2220 }
2184 2221
2185 log->log_start = ctx.pos; 2222 log->log_start = ctx.pos;
2186 log->seq = ctx.seq; 2223 log->seq = ctx.seq;
2187 log->last_checkpoint = pos; 2224 log->last_checkpoint = pos;
2188 r5l_write_super(log, pos); 2225 r5l_write_super(log, pos);
2226
2227 r5c_recovery_flush_data_only_stripes(log, &ctx);
2189 return 0; 2228 return 0;
2190} 2229}
2191 2230
@@ -2247,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2247 val > R5C_JOURNAL_MODE_WRITE_BACK) 2286 val > R5C_JOURNAL_MODE_WRITE_BACK)
2248 return -EINVAL; 2287 return -EINVAL;
2249 2288
2289 if (raid5_calc_degraded(conf) > 0 &&
2290 val == R5C_JOURNAL_MODE_WRITE_BACK)
2291 return -EINVAL;
2292
2250 mddev_suspend(mddev); 2293 mddev_suspend(mddev);
2251 conf->log->r5c_journal_mode = val; 2294 conf->log->r5c_journal_mode = val;
2252 mddev_resume(mddev); 2295 mddev_resume(mddev);
@@ -2301,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
2301 set_bit(STRIPE_R5C_CACHING, &sh->state); 2344 set_bit(STRIPE_R5C_CACHING, &sh->state);
2302 } 2345 }
2303 2346
2347 /*
2348 * When run in degraded mode, array is set to write-through mode.
2349 * This check helps drain pending write safely in the transition to
2350 * write-through mode.
2351 */
2352 if (s->failed) {
2353 r5c_make_stripe_write_out(sh);
2354 return -EAGAIN;
2355 }
2356
2304 for (i = disks; i--; ) { 2357 for (i = disks; i--; ) {
2305 dev = &sh->dev[i]; 2358 dev = &sh->dev[i];
2306 /* if non-overwrite, use writing-out phase */ 2359 /* if non-overwrite, use writing-out phase */
@@ -2351,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
2351 struct page *p = sh->dev[i].orig_page; 2404 struct page *p = sh->dev[i].orig_page;
2352 2405
2353 sh->dev[i].orig_page = sh->dev[i].page; 2406 sh->dev[i].orig_page = sh->dev[i].page;
2407 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2408
2354 if (!using_disk_info_extra_page) 2409 if (!using_disk_info_extra_page)
2355 put_page(p); 2410 put_page(p);
2356 } 2411 }
@@ -2555,6 +2610,19 @@ ioerr:
2555 return ret; 2610 return ret;
2556} 2611}
2557 2612
2613void r5c_update_on_rdev_error(struct mddev *mddev)
2614{
2615 struct r5conf *conf = mddev->private;
2616 struct r5l_log *log = conf->log;
2617
2618 if (!log)
2619 return;
2620
2621 if (raid5_calc_degraded(conf) > 0 &&
2622 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
2623 schedule_work(&log->disable_writeback_work);
2624}
2625
2558int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) 2626int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
2559{ 2627{
2560 struct request_queue *q = bdev_get_queue(rdev->bdev); 2628 struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2627,6 +2695,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
2627 spin_lock_init(&log->no_space_stripes_lock); 2695 spin_lock_init(&log->no_space_stripes_lock);
2628 2696
2629 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); 2697 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
2698 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
2630 2699
2631 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 2700 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2632 INIT_LIST_HEAD(&log->stripe_in_journal_list); 2701 INIT_LIST_HEAD(&log->stripe_in_journal_list);
@@ -2659,6 +2728,7 @@ io_kc:
2659 2728
2660void r5l_exit_log(struct r5l_log *log) 2729void r5l_exit_log(struct r5l_log *log)
2661{ 2730{
2731 flush_work(&log->disable_writeback_work);
2662 md_unregister_thread(&log->reclaim_thread); 2732 md_unregister_thread(&log->reclaim_thread);
2663 mempool_destroy(log->meta_pool); 2733 mempool_destroy(log->meta_pool);
2664 bioset_free(log->bs); 2734 bioset_free(log->bs);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 36c13e4be9c9..3c7e106c12a2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -556,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
556 * of the two sections, and some non-in_sync devices may 556 * of the two sections, and some non-in_sync devices may
557 * be insync in the section most affected by failed devices. 557 * be insync in the section most affected by failed devices.
558 */ 558 */
559static int calc_degraded(struct r5conf *conf) 559int raid5_calc_degraded(struct r5conf *conf)
560{ 560{
561 int degraded, degraded2; 561 int degraded, degraded2;
562 int i; 562 int i;
@@ -619,7 +619,7 @@ static int has_failed(struct r5conf *conf)
619 if (conf->mddev->reshape_position == MaxSector) 619 if (conf->mddev->reshape_position == MaxSector)
620 return conf->mddev->degraded > conf->max_degraded; 620 return conf->mddev->degraded > conf->max_degraded;
621 621
622 degraded = calc_degraded(conf); 622 degraded = raid5_calc_degraded(conf);
623 if (degraded > conf->max_degraded) 623 if (degraded > conf->max_degraded)
624 return 1; 624 return 1;
625 return 0; 625 return 0;
@@ -1015,7 +1015,17 @@ again:
1015 1015
1016 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1016 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1017 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1017 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1018 sh->dev[i].vec.bv_page = sh->dev[i].page; 1018
1019 if (!op_is_write(op) &&
1020 test_bit(R5_InJournal, &sh->dev[i].flags))
1021 /*
1022 * issuing read for a page in journal, this
1023 * must be preparing for prexor in rmw; read
1024 * the data into orig_page
1025 */
1026 sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
1027 else
1028 sh->dev[i].vec.bv_page = sh->dev[i].page;
1019 bi->bi_vcnt = 1; 1029 bi->bi_vcnt = 1;
1020 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1030 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1021 bi->bi_io_vec[0].bv_offset = 0; 1031 bi->bi_io_vec[0].bv_offset = 0;
@@ -2380,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
2380 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2390 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2381 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2391 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2382 2392
2393 if (test_bit(R5_InJournal, &sh->dev[i].flags))
2394 /*
2395 * end read for a page in journal, this
2396 * must be preparing for prexor in rmw
2397 */
2398 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2399
2383 if (atomic_read(&rdev->read_errors)) 2400 if (atomic_read(&rdev->read_errors))
2384 atomic_set(&rdev->read_errors, 0); 2401 atomic_set(&rdev->read_errors, 0);
2385 } else { 2402 } else {
@@ -2538,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2538 2555
2539 spin_lock_irqsave(&conf->device_lock, flags); 2556 spin_lock_irqsave(&conf->device_lock, flags);
2540 clear_bit(In_sync, &rdev->flags); 2557 clear_bit(In_sync, &rdev->flags);
2541 mddev->degraded = calc_degraded(conf); 2558 mddev->degraded = raid5_calc_degraded(conf);
2542 spin_unlock_irqrestore(&conf->device_lock, flags); 2559 spin_unlock_irqrestore(&conf->device_lock, flags);
2543 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2560 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2544 2561
@@ -2552,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2552 bdevname(rdev->bdev, b), 2569 bdevname(rdev->bdev, b),
2553 mdname(mddev), 2570 mdname(mddev),
2554 conf->raid_disks - mddev->degraded); 2571 conf->raid_disks - mddev->degraded);
2572 r5c_update_on_rdev_error(mddev);
2555} 2573}
2556 2574
2557/* 2575/*
@@ -2880,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
2880 return r_sector; 2898 return r_sector;
2881} 2899}
2882 2900
2901/*
2902 * There are cases where we want handle_stripe_dirtying() and
2903 * schedule_reconstruction() to delay towrite to some dev of a stripe.
2904 *
2905 * This function checks whether we want to delay the towrite. Specifically,
2906 * we delay the towrite when:
2907 *
2908 * 1. degraded stripe has a non-overwrite to the missing dev, AND this
2909 * stripe has data in journal (for other devices).
2910 *
2911 * In this case, when reading data for the non-overwrite dev, it is
2912 * necessary to handle complex rmw of write back cache (prexor with
2913 * orig_page, and xor with page). To keep read path simple, we would
2914 * like to flush data in journal to RAID disks first, so complex rmw
2915 * is handled in the write patch (handle_stripe_dirtying).
2916 *
2917 */
2918static inline bool delay_towrite(struct r5dev *dev,
2919 struct stripe_head_state *s)
2920{
2921 return !test_bit(R5_OVERWRITE, &dev->flags) &&
2922 !test_bit(R5_Insync, &dev->flags) && s->injournal;
2923}
2924
2883static void 2925static void
2884schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2926schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2885 int rcw, int expand) 2927 int rcw, int expand)
@@ -2900,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2900 for (i = disks; i--; ) { 2942 for (i = disks; i--; ) {
2901 struct r5dev *dev = &sh->dev[i]; 2943 struct r5dev *dev = &sh->dev[i];
2902 2944
2903 if (dev->towrite) { 2945 if (dev->towrite && !delay_towrite(dev, s)) {
2904 set_bit(R5_LOCKED, &dev->flags); 2946 set_bit(R5_LOCKED, &dev->flags);
2905 set_bit(R5_Wantdrain, &dev->flags); 2947 set_bit(R5_Wantdrain, &dev->flags);
2906 if (!expand) 2948 if (!expand)
@@ -3295,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
3295 return rv; 3337 return rv;
3296} 3338}
3297 3339
3298/* fetch_block - checks the given member device to see if its data needs
3299 * to be read or computed to satisfy a request.
3300 *
3301 * Returns 1 when no more member devices need to be checked, otherwise returns
3302 * 0 to tell the loop in handle_stripe_fill to continue
3303 */
3304
3305static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3340static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3306 int disk_idx, int disks) 3341 int disk_idx, int disks)
3307{ 3342{
@@ -3392,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3392 return 0; 3427 return 0;
3393} 3428}
3394 3429
3430/* fetch_block - checks the given member device to see if its data needs
3431 * to be read or computed to satisfy a request.
3432 *
3433 * Returns 1 when no more member devices need to be checked, otherwise returns
3434 * 0 to tell the loop in handle_stripe_fill to continue
3435 */
3395static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3436static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3396 int disk_idx, int disks) 3437 int disk_idx, int disks)
3397{ 3438{
@@ -3478,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
3478 * midst of changing due to a write 3519 * midst of changing due to a write
3479 */ 3520 */
3480 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3521 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3481 !sh->reconstruct_state) 3522 !sh->reconstruct_state) {
3523
3524 /*
3525 * For degraded stripe with data in journal, do not handle
3526 * read requests yet, instead, flush the stripe to raid
3527 * disks first, this avoids handling complex rmw of write
3528 * back cache (prexor with orig_page, and then xor with
3529 * page) in the read path
3530 */
3531 if (s->injournal && s->failed) {
3532 if (test_bit(STRIPE_R5C_CACHING, &sh->state))
3533 r5c_make_stripe_write_out(sh);
3534 goto out;
3535 }
3536
3482 for (i = disks; i--; ) 3537 for (i = disks; i--; )
3483 if (fetch_block(sh, s, i, disks)) 3538 if (fetch_block(sh, s, i, disks))
3484 break; 3539 break;
3540 }
3541out:
3485 set_bit(STRIPE_HANDLE, &sh->state); 3542 set_bit(STRIPE_HANDLE, &sh->state);
3486} 3543}
3487 3544
@@ -3594,6 +3651,21 @@ unhash:
3594 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3651 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
3595} 3652}
3596 3653
3654/*
3655 * For RMW in write back cache, we need extra page in prexor to store the
3656 * old data. This page is stored in dev->orig_page.
3657 *
3658 * This function checks whether we have data for prexor. The exact logic
3659 * is:
3660 * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
3661 */
3662static inline bool uptodate_for_rmw(struct r5dev *dev)
3663{
3664 return (test_bit(R5_UPTODATE, &dev->flags)) &&
3665 (!test_bit(R5_InJournal, &dev->flags) ||
3666 test_bit(R5_OrigPageUPTDODATE, &dev->flags));
3667}
3668
3597static int handle_stripe_dirtying(struct r5conf *conf, 3669static int handle_stripe_dirtying(struct r5conf *conf,
3598 struct stripe_head *sh, 3670 struct stripe_head *sh,
3599 struct stripe_head_state *s, 3671 struct stripe_head_state *s,
@@ -3622,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3622 } else for (i = disks; i--; ) { 3694 } else for (i = disks; i--; ) {
3623 /* would I have to read this buffer for read_modify_write */ 3695 /* would I have to read this buffer for read_modify_write */
3624 struct r5dev *dev = &sh->dev[i]; 3696 struct r5dev *dev = &sh->dev[i];
3625 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx || 3697 if (((dev->towrite && !delay_towrite(dev, s)) ||
3698 i == sh->pd_idx || i == sh->qd_idx ||
3626 test_bit(R5_InJournal, &dev->flags)) && 3699 test_bit(R5_InJournal, &dev->flags)) &&
3627 !test_bit(R5_LOCKED, &dev->flags) && 3700 !test_bit(R5_LOCKED, &dev->flags) &&
3628 !((test_bit(R5_UPTODATE, &dev->flags) && 3701 !(uptodate_for_rmw(dev) ||
3629 (!test_bit(R5_InJournal, &dev->flags) ||
3630 dev->page != dev->orig_page)) ||
3631 test_bit(R5_Wantcompute, &dev->flags))) { 3702 test_bit(R5_Wantcompute, &dev->flags))) {
3632 if (test_bit(R5_Insync, &dev->flags)) 3703 if (test_bit(R5_Insync, &dev->flags))
3633 rmw++; 3704 rmw++;
@@ -3639,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3639 i != sh->pd_idx && i != sh->qd_idx && 3710 i != sh->pd_idx && i != sh->qd_idx &&
3640 !test_bit(R5_LOCKED, &dev->flags) && 3711 !test_bit(R5_LOCKED, &dev->flags) &&
3641 !(test_bit(R5_UPTODATE, &dev->flags) || 3712 !(test_bit(R5_UPTODATE, &dev->flags) ||
3642 test_bit(R5_InJournal, &dev->flags) ||
3643 test_bit(R5_Wantcompute, &dev->flags))) { 3713 test_bit(R5_Wantcompute, &dev->flags))) {
3644 if (test_bit(R5_Insync, &dev->flags)) 3714 if (test_bit(R5_Insync, &dev->flags))
3645 rcw++; 3715 rcw++;
@@ -3689,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3689 3759
3690 for (i = disks; i--; ) { 3760 for (i = disks; i--; ) {
3691 struct r5dev *dev = &sh->dev[i]; 3761 struct r5dev *dev = &sh->dev[i];
3692 if ((dev->towrite || 3762 if (((dev->towrite && !delay_towrite(dev, s)) ||
3693 i == sh->pd_idx || i == sh->qd_idx || 3763 i == sh->pd_idx || i == sh->qd_idx ||
3694 test_bit(R5_InJournal, &dev->flags)) && 3764 test_bit(R5_InJournal, &dev->flags)) &&
3695 !test_bit(R5_LOCKED, &dev->flags) && 3765 !test_bit(R5_LOCKED, &dev->flags) &&
3696 !((test_bit(R5_UPTODATE, &dev->flags) && 3766 !(uptodate_for_rmw(dev) ||
3697 (!test_bit(R5_InJournal, &dev->flags) ||
3698 dev->page != dev->orig_page)) ||
3699 test_bit(R5_Wantcompute, &dev->flags)) && 3767 test_bit(R5_Wantcompute, &dev->flags)) &&
3700 test_bit(R5_Insync, &dev->flags)) { 3768 test_bit(R5_Insync, &dev->flags)) {
3701 if (test_bit(STRIPE_PREREAD_ACTIVE, 3769 if (test_bit(STRIPE_PREREAD_ACTIVE,
@@ -3722,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3722 i != sh->pd_idx && i != sh->qd_idx && 3790 i != sh->pd_idx && i != sh->qd_idx &&
3723 !test_bit(R5_LOCKED, &dev->flags) && 3791 !test_bit(R5_LOCKED, &dev->flags) &&
3724 !(test_bit(R5_UPTODATE, &dev->flags) || 3792 !(test_bit(R5_UPTODATE, &dev->flags) ||
3725 test_bit(R5_InJournal, &dev->flags) ||
3726 test_bit(R5_Wantcompute, &dev->flags))) { 3793 test_bit(R5_Wantcompute, &dev->flags))) {
3727 rcw++; 3794 rcw++;
3728 if (test_bit(R5_Insync, &dev->flags) && 3795 if (test_bit(R5_Insync, &dev->flags) &&
@@ -7025,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
7025 /* 7092 /*
7026 * 0 for a fully functional array, 1 or 2 for a degraded array. 7093 * 0 for a fully functional array, 1 or 2 for a degraded array.
7027 */ 7094 */
7028 mddev->degraded = calc_degraded(conf); 7095 mddev->degraded = raid5_calc_degraded(conf);
7029 7096
7030 if (has_failed(conf)) { 7097 if (has_failed(conf)) {
7031 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", 7098 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
@@ -7272,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
7272 } 7339 }
7273 } 7340 }
7274 spin_lock_irqsave(&conf->device_lock, flags); 7341 spin_lock_irqsave(&conf->device_lock, flags);
7275 mddev->degraded = calc_degraded(conf); 7342 mddev->degraded = raid5_calc_degraded(conf);
7276 spin_unlock_irqrestore(&conf->device_lock, flags); 7343 spin_unlock_irqrestore(&conf->device_lock, flags);
7277 print_raid5_conf(conf); 7344 print_raid5_conf(conf);
7278 return count; 7345 return count;
@@ -7632,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
7632 * pre and post number of devices. 7699 * pre and post number of devices.
7633 */ 7700 */
7634 spin_lock_irqsave(&conf->device_lock, flags); 7701 spin_lock_irqsave(&conf->device_lock, flags);
7635 mddev->degraded = calc_degraded(conf); 7702 mddev->degraded = raid5_calc_degraded(conf);
7636 spin_unlock_irqrestore(&conf->device_lock, flags); 7703 spin_unlock_irqrestore(&conf->device_lock, flags);
7637 } 7704 }
7638 mddev->raid_disks = conf->raid_disks; 7705 mddev->raid_disks = conf->raid_disks;
@@ -7720,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
7720 } else { 7787 } else {
7721 int d; 7788 int d;
7722 spin_lock_irq(&conf->device_lock); 7789 spin_lock_irq(&conf->device_lock);
7723 mddev->degraded = calc_degraded(conf); 7790 mddev->degraded = raid5_calc_degraded(conf);
7724 spin_unlock_irq(&conf->device_lock); 7791 spin_unlock_irq(&conf->device_lock);
7725 for (d = conf->raid_disks ; 7792 for (d = conf->raid_disks ;
7726 d < conf->raid_disks - mddev->delta_disks; 7793 d < conf->raid_disks - mddev->delta_disks;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index ed8e1362ab36..1440fa26e296 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -322,6 +322,11 @@ enum r5dev_flags {
322 * data and parity being written are in the journal 322 * data and parity being written are in the journal
323 * device 323 * device
324 */ 324 */
325 R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
326 * dev->orig_page for prexor. When this flag is
327 * set, orig_page contains latest data in the
328 * raid disk.
329 */
325}; 330};
326 331
327/* 332/*
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
753extern struct stripe_head * 758extern struct stripe_head *
754raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 759raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
755 int previous, int noblock, int noquiesce); 760 int previous, int noblock, int noquiesce);
761extern int raid5_calc_degraded(struct r5conf *conf);
756extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); 762extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
757extern void r5l_exit_log(struct r5l_log *log); 763extern void r5l_exit_log(struct r5l_log *log);
758extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); 764extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
781extern void r5c_check_stripe_cache_usage(struct r5conf *conf); 787extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
782extern void r5c_check_cached_full_stripe(struct r5conf *conf); 788extern void r5c_check_cached_full_stripe(struct r5conf *conf);
783extern struct md_sysfs_entry r5c_journal_mode; 789extern struct md_sysfs_entry r5c_journal_mode;
790extern void r5c_update_on_rdev_error(struct mddev *mddev);
784#endif 791#endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 0ea4efb3de66..87a6b65ed3af 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -30,8 +30,9 @@
30 30
31#include "cec-priv.h" 31#include "cec-priv.h"
32 32
33static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx); 33static void cec_fill_msg_report_features(struct cec_adapter *adap,
34static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx); 34 struct cec_msg *msg,
35 unsigned int la_idx);
35 36
36/* 37/*
37 * 400 ms is the time it takes for one 16 byte message to be 38 * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
288 289
289 /* Mark it as an error */ 290 /* Mark it as an error */
290 data->msg.tx_ts = ktime_get_ns(); 291 data->msg.tx_ts = ktime_get_ns();
291 data->msg.tx_status = CEC_TX_STATUS_ERROR | 292 data->msg.tx_status |= CEC_TX_STATUS_ERROR |
292 CEC_TX_STATUS_MAX_RETRIES; 293 CEC_TX_STATUS_MAX_RETRIES;
294 data->msg.tx_error_cnt++;
293 data->attempts = 0; 295 data->attempts = 0;
294 data->msg.tx_error_cnt = 1;
295 /* Queue transmitted message for monitoring purposes */ 296 /* Queue transmitted message for monitoring purposes */
296 cec_queue_msg_monitor(data->adap, &data->msg, 1); 297 cec_queue_msg_monitor(data->adap, &data->msg, 1);
297 298
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
851 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, 852 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
852 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, 853 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
853 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, 854 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
854 [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST, 855 [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
855 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, 856 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
856}; 857};
857 858
@@ -1205,7 +1206,7 @@ static int cec_config_thread_func(void *arg)
1205 las->log_addr[i] = CEC_LOG_ADDR_INVALID; 1206 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1206 if (last_la == CEC_LOG_ADDR_INVALID || 1207 if (last_la == CEC_LOG_ADDR_INVALID ||
1207 last_la == CEC_LOG_ADDR_UNREGISTERED || 1208 last_la == CEC_LOG_ADDR_UNREGISTERED ||
1208 !(last_la & type2mask[type])) 1209 !((1 << last_la) & type2mask[type]))
1209 last_la = la_list[0]; 1210 last_la = la_list[0];
1210 1211
1211 err = cec_config_log_addr(adap, i, last_la); 1212 err = cec_config_log_addr(adap, i, last_la);
@@ -1250,30 +1251,49 @@ configured:
1250 for (i = 1; i < las->num_log_addrs; i++) 1251 for (i = 1; i < las->num_log_addrs; i++)
1251 las->log_addr[i] = CEC_LOG_ADDR_INVALID; 1252 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1252 } 1253 }
1254 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1255 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1253 adap->is_configured = true; 1256 adap->is_configured = true;
1254 adap->is_configuring = false; 1257 adap->is_configuring = false;
1255 cec_post_state_event(adap); 1258 cec_post_state_event(adap);
1256 mutex_unlock(&adap->lock);
1257 1259
1260 /*
1261 * Now post the Report Features and Report Physical Address broadcast
1262 * messages. Note that these are non-blocking transmits, meaning that
1263 * they are just queued up and once adap->lock is unlocked the main
1264 * thread will kick in and start transmitting these.
1265 *
1266 * If after this function is done (but before one or more of these
1267 * messages are actually transmitted) the CEC adapter is unconfigured,
1268 * then any remaining messages will be dropped by the main thread.
1269 */
1258 for (i = 0; i < las->num_log_addrs; i++) { 1270 for (i = 0; i < las->num_log_addrs; i++) {
1271 struct cec_msg msg = {};
1272
1259 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || 1273 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
1260 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) 1274 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
1261 continue; 1275 continue;
1262 1276
1263 /* 1277 msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
1264 * Report Features must come first according 1278
1265 * to CEC 2.0 1279 /* Report Features must come first according to CEC 2.0 */
1266 */ 1280 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
1267 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED) 1281 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
1268 cec_report_features(adap, i); 1282 cec_fill_msg_report_features(adap, &msg, i);
1269 cec_report_phys_addr(adap, i); 1283 cec_transmit_msg_fh(adap, &msg, NULL, false);
1284 }
1285
1286 /* Report Physical Address */
1287 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1288 las->primary_device_type[i]);
1289 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1290 las->log_addr[i],
1291 cec_phys_addr_exp(adap->phys_addr));
1292 cec_transmit_msg_fh(adap, &msg, NULL, false);
1270 } 1293 }
1271 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1272 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1273 mutex_lock(&adap->lock);
1274 adap->kthread_config = NULL; 1294 adap->kthread_config = NULL;
1275 mutex_unlock(&adap->lock);
1276 complete(&adap->config_completion); 1295 complete(&adap->config_completion);
1296 mutex_unlock(&adap->lock);
1277 return 0; 1297 return 0;
1278 1298
1279unconfigure: 1299unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
1526 1546
1527/* High-level core CEC message handling */ 1547/* High-level core CEC message handling */
1528 1548
1529/* Transmit the Report Features message */ 1549/* Fill in the Report Features message */
1530static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx) 1550static void cec_fill_msg_report_features(struct cec_adapter *adap,
1551 struct cec_msg *msg,
1552 unsigned int la_idx)
1531{ 1553{
1532 struct cec_msg msg = { };
1533 const struct cec_log_addrs *las = &adap->log_addrs; 1554 const struct cec_log_addrs *las = &adap->log_addrs;
1534 const u8 *features = las->features[la_idx]; 1555 const u8 *features = las->features[la_idx];
1535 bool op_is_dev_features = false; 1556 bool op_is_dev_features = false;
1536 unsigned int idx; 1557 unsigned int idx;
1537 1558
1538 /* This is 2.0 and up only */
1539 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1540 return 0;
1541
1542 /* Report Features */ 1559 /* Report Features */
1543 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; 1560 msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1544 msg.len = 4; 1561 msg->len = 4;
1545 msg.msg[1] = CEC_MSG_REPORT_FEATURES; 1562 msg->msg[1] = CEC_MSG_REPORT_FEATURES;
1546 msg.msg[2] = adap->log_addrs.cec_version; 1563 msg->msg[2] = adap->log_addrs.cec_version;
1547 msg.msg[3] = las->all_device_types[la_idx]; 1564 msg->msg[3] = las->all_device_types[la_idx];
1548 1565
1549 /* Write RC Profiles first, then Device Features */ 1566 /* Write RC Profiles first, then Device Features */
1550 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { 1567 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
1551 msg.msg[msg.len++] = features[idx]; 1568 msg->msg[msg->len++] = features[idx];
1552 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { 1569 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
1553 if (op_is_dev_features) 1570 if (op_is_dev_features)
1554 break; 1571 break;
1555 op_is_dev_features = true; 1572 op_is_dev_features = true;
1556 } 1573 }
1557 } 1574 }
1558 return cec_transmit_msg(adap, &msg, false);
1559}
1560
1561/* Transmit the Report Physical Address message */
1562static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
1563{
1564 const struct cec_log_addrs *las = &adap->log_addrs;
1565 struct cec_msg msg = { };
1566
1567 /* Report Physical Address */
1568 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1569 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1570 las->primary_device_type[la_idx]);
1571 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1572 las->log_addr[la_idx],
1573 cec_phys_addr_exp(adap->phys_addr));
1574 return cec_transmit_msg(adap, &msg, false);
1575} 1575}
1576 1576
1577/* Transmit the Feature Abort message */ 1577/* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1777 } 1777 }
1778 1778
1779 case CEC_MSG_GIVE_FEATURES: 1779 case CEC_MSG_GIVE_FEATURES:
1780 if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) 1780 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1781 return cec_report_features(adap, la_idx); 1781 return cec_feature_abort(adap, msg);
1782 return 0; 1782 cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
1783 return cec_transmit_msg(adap, &tx_cec_msg, false);
1783 1784
1784 default: 1785 default:
1785 /* 1786 /*
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index bc5e8cfe7ca2..8f11d7e45993 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
719 skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, 719 skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
720 ETH_ALEN); 720 ETH_ALEN);
721 skb_pull(h->priv->ule_skb, ETH_ALEN); 721 skb_pull(h->priv->ule_skb, ETH_ALEN);
722 } else {
723 /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
724 eth_zero_addr(dest_addr);
722 } 725 }
723 726
724 /* Handle ULE Extension Headers. */ 727 /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
750 if (!h->priv->ule_bridged) { 753 if (!h->priv->ule_bridged) {
751 skb_push(h->priv->ule_skb, ETH_HLEN); 754 skb_push(h->priv->ule_skb, ETH_HLEN);
752 h->ethh = (struct ethhdr *)h->priv->ule_skb->data; 755 h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
753 if (!h->priv->ule_dbit) { 756 memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
754 /* 757 eth_zero_addr(h->ethh->h_source);
755 * dest_addr buffer is only valid if
756 * h->priv->ule_dbit == 0
757 */
758 memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
759 eth_zero_addr(h->ethh->h_source);
760 } else /* zeroize source and dest */
761 memset(h->ethh, 0, ETH_ALEN * 2);
762
763 h->ethh->h_proto = htons(h->priv->ule_sndu_type); 758 h->ethh->h_proto = htons(h->priv->ule_sndu_type);
764 } 759 }
765 /* else: skb is in correct state; nothing to do. */ 760 /* else: skb is in correct state; nothing to do. */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b31fa6fae009..b979ea148251 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
655config VIDEO_S5K4ECGX 655config VIDEO_S5K4ECGX
656 tristate "Samsung S5K4ECGX sensor support" 656 tristate "Samsung S5K4ECGX sensor support"
657 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 657 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
658 select CRC32
658 ---help--- 659 ---help---
659 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M 660 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
660 camera sensor with an embedded SoC image signal processor. 661 camera sensor with an embedded SoC image signal processor.
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 59872b31f832..f4e92bdfe192 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
2741 * I2C Driver 2741 * I2C Driver
2742 */ 2742 */
2743 2743
2744#ifdef CONFIG_PM 2744static int __maybe_unused smiapp_suspend(struct device *dev)
2745
2746static int smiapp_suspend(struct device *dev)
2747{ 2745{
2748 struct i2c_client *client = to_i2c_client(dev); 2746 struct i2c_client *client = to_i2c_client(dev);
2749 struct v4l2_subdev *subdev = i2c_get_clientdata(client); 2747 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
2768 return 0; 2766 return 0;
2769} 2767}
2770 2768
2771static int smiapp_resume(struct device *dev) 2769static int __maybe_unused smiapp_resume(struct device *dev)
2772{ 2770{
2773 struct i2c_client *client = to_i2c_client(dev); 2771 struct i2c_client *client = to_i2c_client(dev);
2774 struct v4l2_subdev *subdev = i2c_get_clientdata(client); 2772 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
2783 return rval; 2781 return rval;
2784} 2782}
2785 2783
2786#else
2787
2788#define smiapp_suspend NULL
2789#define smiapp_resume NULL
2790
2791#endif /* CONFIG_PM */
2792
2793static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) 2784static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
2794{ 2785{
2795 struct smiapp_hwconfig *hwcfg; 2786 struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
2913 if (IS_ERR(sensor->xshutdown)) 2904 if (IS_ERR(sensor->xshutdown))
2914 return PTR_ERR(sensor->xshutdown); 2905 return PTR_ERR(sensor->xshutdown);
2915 2906
2916 pm_runtime_enable(&client->dev); 2907 rval = smiapp_power_on(&client->dev);
2917 2908 if (rval < 0)
2918 rval = pm_runtime_get_sync(&client->dev); 2909 return rval;
2919 if (rval < 0) {
2920 rval = -ENODEV;
2921 goto out_power_off;
2922 }
2923 2910
2924 rval = smiapp_identify_module(sensor); 2911 rval = smiapp_identify_module(sensor);
2925 if (rval) { 2912 if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
3100 if (rval < 0) 3087 if (rval < 0)
3101 goto out_media_entity_cleanup; 3088 goto out_media_entity_cleanup;
3102 3089
3090 pm_runtime_set_active(&client->dev);
3091 pm_runtime_get_noresume(&client->dev);
3092 pm_runtime_enable(&client->dev);
3103 pm_runtime_set_autosuspend_delay(&client->dev, 1000); 3093 pm_runtime_set_autosuspend_delay(&client->dev, 1000);
3104 pm_runtime_use_autosuspend(&client->dev); 3094 pm_runtime_use_autosuspend(&client->dev);
3105 pm_runtime_put_autosuspend(&client->dev); 3095 pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
3113 smiapp_cleanup(sensor); 3103 smiapp_cleanup(sensor);
3114 3104
3115out_power_off: 3105out_power_off:
3116 pm_runtime_put(&client->dev); 3106 smiapp_power_off(&client->dev);
3117 pm_runtime_disable(&client->dev);
3118 3107
3119 return rval; 3108 return rval;
3120} 3109}
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
3127 3116
3128 v4l2_async_unregister_subdev(subdev); 3117 v4l2_async_unregister_subdev(subdev);
3129 3118
3130 pm_runtime_suspend(&client->dev);
3131 pm_runtime_disable(&client->dev); 3119 pm_runtime_disable(&client->dev);
3120 if (!pm_runtime_status_suspended(&client->dev))
3121 smiapp_power_off(&client->dev);
3122 pm_runtime_set_suspended(&client->dev);
3132 3123
3133 for (i = 0; i < sensor->ssds_used; i++) { 3124 for (i = 0; i < sensor->ssds_used; i++) {
3134 v4l2_device_unregister_subdev(&sensor->ssds[i].sd); 3125 v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3a0fe8cc64e9..48646a7f3fb0 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
291 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode); 291 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
292 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input); 292 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
293 293
294 /* Svideo should enable YCrCb output and disable GPCL output 294 /*
295 * For Composite and TV, it should be the reverse 295 * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
296 * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
297 * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
298 * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
299 * INTREQ/GPCL/VBLK to logic 1.
296 */ 300 */
297 val = tvp5150_read(sd, TVP5150_MISC_CTL); 301 val = tvp5150_read(sd, TVP5150_MISC_CTL);
298 if (val < 0) { 302 if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
301 } 305 }
302 306
303 if (decoder->input == TVP5150_SVIDEO) 307 if (decoder->input == TVP5150_SVIDEO)
304 val = (val & ~0x40) | 0x10; 308 val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
305 else 309 else
306 val = (val & ~0x10) | 0x40; 310 val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
307 tvp5150_write(sd, TVP5150_MISC_CTL, val); 311 tvp5150_write(sd, TVP5150_MISC_CTL, val);
308}; 312};
309 313
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
455 },{ /* Automatic offset and AGC enabled */ 459 },{ /* Automatic offset and AGC enabled */
456 TVP5150_ANAL_CHL_CTL, 0x15 460 TVP5150_ANAL_CHL_CTL, 0x15
457 },{ /* Activate YCrCb output 0x9 or 0xd ? */ 461 },{ /* Activate YCrCb output 0x9 or 0xd ? */
458 TVP5150_MISC_CTL, 0x6f 462 TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
463 TVP5150_MISC_CTL_INTREQ_OE |
464 TVP5150_MISC_CTL_YCBCR_OE |
465 TVP5150_MISC_CTL_SYNC_OE |
466 TVP5150_MISC_CTL_VBLANK |
467 TVP5150_MISC_CTL_CLOCK_OE,
459 },{ /* Activates video std autodetection for all standards */ 468 },{ /* Activates video std autodetection for all standards */
460 TVP5150_AUTOSW_MSK, 0x0 469 TVP5150_AUTOSW_MSK, 0x0
461 },{ /* Default format: 0x47. For 4:2:2: 0x40 */ 470 },{ /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
861 870
862 f = &format->format; 871 f = &format->format;
863 872
864 tvp5150_reset(sd, 0);
865
866 f->width = decoder->rect.width; 873 f->width = decoder->rect.width;
867 f->height = decoder->rect.height / 2; 874 f->height = decoder->rect.height / 2;
868 875
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
1051static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) 1058static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
1052{ 1059{
1053 struct tvp5150 *decoder = to_tvp5150(sd); 1060 struct tvp5150 *decoder = to_tvp5150(sd);
1054 /* Output format: 8-bit ITU-R BT.656 with embedded syncs */ 1061 int val;
1055 int val = 0x09;
1056
1057 /* Output format: 8-bit 4:2:2 YUV with discrete sync */
1058 if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
1059 val = 0x0d;
1060 1062
1061 /* Initializes TVP5150 to its default values */ 1063 /* Enable or disable the video output signals. */
1062 /* # set PCLK (27MHz) */ 1064 val = tvp5150_read(sd, TVP5150_MISC_CTL);
1063 tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00); 1065 if (val < 0)
1066 return val;
1067
1068 val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
1069 TVP5150_MISC_CTL_CLOCK_OE);
1070
1071 if (enable) {
1072 /*
1073 * Enable the YCbCr and clock outputs. In discrete sync mode
1074 * (non-BT.656) additionally enable the the sync outputs.
1075 */
1076 val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
1077 if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
1078 val |= TVP5150_MISC_CTL_SYNC_OE;
1079 }
1064 1080
1065 if (enable) 1081 tvp5150_write(sd, TVP5150_MISC_CTL, val);
1066 tvp5150_write(sd, TVP5150_MISC_CTL, val);
1067 else
1068 tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
1069 1082
1070 return 0; 1083 return 0;
1071} 1084}
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
1524 res = core->hdl.error; 1537 res = core->hdl.error;
1525 goto err; 1538 goto err;
1526 } 1539 }
1527 v4l2_ctrl_handler_setup(&core->hdl);
1528 1540
1529 /* Default is no cropping */ 1541 /* Default is no cropping */
1530 core->rect.top = 0; 1542 core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
1535 core->rect.left = 0; 1547 core->rect.left = 0;
1536 core->rect.width = TVP5150_H_MAX; 1548 core->rect.width = TVP5150_H_MAX;
1537 1549
1550 tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
1551
1538 res = v4l2_async_register_subdev(sd); 1552 res = v4l2_async_register_subdev(sd);
1539 if (res < 0) 1553 if (res < 0)
1540 goto err; 1554 goto err;
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 25a994944918..30a48c28d05a 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -9,6 +9,15 @@
9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ 9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ 10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
11#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */ 11#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
12#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
13#define TVP5150_MISC_CTL_GPCL BIT(6)
14#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
15#define TVP5150_MISC_CTL_HVLK BIT(4)
16#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
17#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
18#define TVP5150_MISC_CTL_VBLANK BIT(1)
19#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
20
12#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */ 21#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
13 22
14/* Reserved 05h */ 23/* Reserved 05h */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 979634000597..d5c911c09e2b 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) 308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
309{ 309{
310 free_irq(pci_dev->irq, (void *)cobalt); 310 free_irq(pci_dev->irq, (void *)cobalt);
311 311 pci_free_irq_vectors(pci_dev);
312 if (cobalt->msi_enabled)
313 pci_disable_msi(pci_dev);
314} 312}
315 313
316static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, 314static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
387 from being generated. */ 385 from being generated. */
388 cobalt_set_interrupt(cobalt, false); 386 cobalt_set_interrupt(cobalt, false);
389 387
390 if (pci_enable_msi_range(pci_dev, 1, 1) < 1) { 388 if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
391 cobalt_err("Could not enable MSI\n"); 389 cobalt_err("Could not enable MSI\n");
392 cobalt->msi_enabled = false;
393 ret = -EIO; 390 ret = -EIO;
394 goto err_release; 391 goto err_release;
395 } 392 }
396 msi_config_show(cobalt, pci_dev); 393 msi_config_show(cobalt, pci_dev);
397 cobalt->msi_enabled = true;
398 394
399 /* Register IRQ */ 395 /* Register IRQ */
400 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, 396 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9d9399..00f773ec359a 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
287 u32 irq_none; 287 u32 irq_none;
288 u32 irq_full_fifo; 288 u32 irq_full_fifo;
289 289
290 bool msi_enabled;
291
292 /* omnitek dma */ 290 /* omnitek dma */
293 int dma_channels; 291 int dma_channels;
294 int first_fifo_channel; 292 int first_fifo_channel;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 07fa08be9e99..d54ebe7e0215 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,14 +97,13 @@ struct pctv452e_state {
97 u8 c; /* transaction counter, wraps around... */ 97 u8 c; /* transaction counter, wraps around... */
98 u8 initialized; /* set to 1 if 0x15 has been sent */ 98 u8 initialized; /* set to 1 if 0x15 has been sent */
99 u16 last_rc_key; 99 u16 last_rc_key;
100
101 unsigned char data[80];
102}; 100};
103 101
104static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, 102static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
105 unsigned int write_len, unsigned int read_len) 103 unsigned int write_len, unsigned int read_len)
106{ 104{
107 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 105 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
106 u8 *buf;
108 u8 id; 107 u8 id;
109 unsigned int rlen; 108 unsigned int rlen;
110 int ret; 109 int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
114 return -EIO; 113 return -EIO;
115 } 114 }
116 115
117 mutex_lock(&state->ca_mutex); 116 buf = kmalloc(64, GFP_KERNEL);
117 if (!buf)
118 return -ENOMEM;
119
118 id = state->c++; 120 id = state->c++;
119 121
120 state->data[0] = SYNC_BYTE_OUT; 122 buf[0] = SYNC_BYTE_OUT;
121 state->data[1] = id; 123 buf[1] = id;
122 state->data[2] = cmd; 124 buf[2] = cmd;
123 state->data[3] = write_len; 125 buf[3] = write_len;
124 126
125 memcpy(state->data + 4, data, write_len); 127 memcpy(buf + 4, data, write_len);
126 128
127 rlen = (read_len > 0) ? 64 : 0; 129 rlen = (read_len > 0) ? 64 : 0;
128 ret = dvb_usb_generic_rw(d, state->data, 4 + write_len, 130 ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
129 state->data, rlen, /* delay_ms */ 0); 131 buf, rlen, /* delay_ms */ 0);
130 if (0 != ret) 132 if (0 != ret)
131 goto failed; 133 goto failed;
132 134
133 ret = -EIO; 135 ret = -EIO;
134 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) 136 if (SYNC_BYTE_IN != buf[0] || id != buf[1])
135 goto failed; 137 goto failed;
136 138
137 memcpy(data, state->data + 4, read_len); 139 memcpy(data, buf + 4, read_len);
138 140
139 mutex_unlock(&state->ca_mutex); 141 kfree(buf);
140 return 0; 142 return 0;
141 143
142failed: 144failed:
143 err("CI error %d; %02X %02X %02X -> %*ph.", 145 err("CI error %d; %02X %02X %02X -> %*ph.",
144 ret, SYNC_BYTE_OUT, id, cmd, 3, state->data); 146 ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
145 147
146 mutex_unlock(&state->ca_mutex); 148 kfree(buf);
147 return ret; 149 return ret;
148} 150}
149 151
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
410 u8 *rcv_buf, u8 rcv_len) 412 u8 *rcv_buf, u8 rcv_len)
411{ 413{
412 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 414 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
415 u8 *buf;
413 u8 id; 416 u8 id;
414 int ret; 417 int ret;
415 418
416 mutex_lock(&state->ca_mutex); 419 buf = kmalloc(64, GFP_KERNEL);
420 if (!buf)
421 return -ENOMEM;
422
417 id = state->c++; 423 id = state->c++;
418 424
419 ret = -EINVAL; 425 ret = -EINVAL;
420 if (snd_len > 64 - 7 || rcv_len > 64 - 7) 426 if (snd_len > 64 - 7 || rcv_len > 64 - 7)
421 goto failed; 427 goto failed;
422 428
423 state->data[0] = SYNC_BYTE_OUT; 429 buf[0] = SYNC_BYTE_OUT;
424 state->data[1] = id; 430 buf[1] = id;
425 state->data[2] = PCTV_CMD_I2C; 431 buf[2] = PCTV_CMD_I2C;
426 state->data[3] = snd_len + 3; 432 buf[3] = snd_len + 3;
427 state->data[4] = addr << 1; 433 buf[4] = addr << 1;
428 state->data[5] = snd_len; 434 buf[5] = snd_len;
429 state->data[6] = rcv_len; 435 buf[6] = rcv_len;
430 436
431 memcpy(state->data + 7, snd_buf, snd_len); 437 memcpy(buf + 7, snd_buf, snd_len);
432 438
433 ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len, 439 ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
434 state->data, /* rcv_len */ 64, 440 buf, /* rcv_len */ 64,
435 /* delay_ms */ 0); 441 /* delay_ms */ 0);
436 if (ret < 0) 442 if (ret < 0)
437 goto failed; 443 goto failed;
438 444
439 /* TT USB protocol error. */ 445 /* TT USB protocol error. */
440 ret = -EIO; 446 ret = -EIO;
441 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) 447 if (SYNC_BYTE_IN != buf[0] || id != buf[1])
442 goto failed; 448 goto failed;
443 449
444 /* I2C device didn't respond as expected. */ 450 /* I2C device didn't respond as expected. */
445 ret = -EREMOTEIO; 451 ret = -EREMOTEIO;
446 if (state->data[5] < snd_len || state->data[6] < rcv_len) 452 if (buf[5] < snd_len || buf[6] < rcv_len)
447 goto failed; 453 goto failed;
448 454
449 memcpy(rcv_buf, state->data + 7, rcv_len); 455 memcpy(rcv_buf, buf + 7, rcv_len);
450 mutex_unlock(&state->ca_mutex);
451 456
457 kfree(buf);
452 return rcv_len; 458 return rcv_len;
453 459
454failed: 460failed:
455 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", 461 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
456 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, 462 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
457 7, state->data); 463 7, buf);
458 464
459 mutex_unlock(&state->ca_mutex); 465 kfree(buf);
460 return ret; 466 return ret;
461} 467}
462 468
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
505static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) 511static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
506{ 512{
507 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 513 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
508 u8 *rx; 514 u8 *b0, *rx;
509 int ret; 515 int ret;
510 516
511 info("%s: %d\n", __func__, i); 517 info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
516 if (state->initialized) 522 if (state->initialized)
517 return 0; 523 return 0;
518 524
519 rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL); 525 b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
520 if (!rx) 526 if (!b0)
521 return -ENOMEM; 527 return -ENOMEM;
522 528
523 mutex_lock(&state->ca_mutex); 529 rx = b0 + 5;
530
524 /* hmm where shoud this should go? */ 531 /* hmm where shoud this should go? */
525 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); 532 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
526 if (ret != 0) 533 if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
528 __func__, ret); 535 __func__, ret);
529 536
530 /* this is a one-time initialization, dont know where to put */ 537 /* this is a one-time initialization, dont know where to put */
531 state->data[0] = 0xaa; 538 b0[0] = 0xaa;
532 state->data[1] = state->c++; 539 b0[1] = state->c++;
533 state->data[2] = PCTV_CMD_RESET; 540 b0[2] = PCTV_CMD_RESET;
534 state->data[3] = 1; 541 b0[3] = 1;
535 state->data[4] = 0; 542 b0[4] = 0;
536 /* reset board */ 543 /* reset board */
537 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); 544 ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
538 if (ret) 545 if (ret)
539 goto ret; 546 goto ret;
540 547
541 state->data[1] = state->c++; 548 b0[1] = state->c++;
542 state->data[4] = 1; 549 b0[4] = 1;
543 /* reset board (again?) */ 550 /* reset board (again?) */
544 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); 551 ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
545 if (ret) 552 if (ret)
546 goto ret; 553 goto ret;
547 554
548 state->initialized = 1; 555 state->initialized = 1;
549 556
550ret: 557ret:
551 mutex_unlock(&state->ca_mutex); 558 kfree(b0);
552 kfree(rx);
553 return ret; 559 return ret;
554} 560}
555 561
556static int pctv452e_rc_query(struct dvb_usb_device *d) 562static int pctv452e_rc_query(struct dvb_usb_device *d)
557{ 563{
558 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 564 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
565 u8 *b, *rx;
559 int ret, i; 566 int ret, i;
560 u8 id; 567 u8 id;
561 568
562 mutex_lock(&state->ca_mutex); 569 b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
570 if (!b)
571 return -ENOMEM;
572
573 rx = b + CMD_BUFFER_SIZE;
574
563 id = state->c++; 575 id = state->c++;
564 576
565 /* prepare command header */ 577 /* prepare command header */
566 state->data[0] = SYNC_BYTE_OUT; 578 b[0] = SYNC_BYTE_OUT;
567 state->data[1] = id; 579 b[1] = id;
568 state->data[2] = PCTV_CMD_IR; 580 b[2] = PCTV_CMD_IR;
569 state->data[3] = 0; 581 b[3] = 0;
570 582
571 /* send ir request */ 583 /* send ir request */
572 ret = dvb_usb_generic_rw(d, state->data, 4, 584 ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
573 state->data, PCTV_ANSWER_LEN, 0);
574 if (ret != 0) 585 if (ret != 0)
575 goto ret; 586 goto ret;
576 587
577 if (debug > 3) { 588 if (debug > 3) {
578 info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data); 589 info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
579 for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++) 590 for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
580 info(" %02x", state->data[i + 3]); 591 info(" %02x", rx[i+3]);
581 592
582 info("\n"); 593 info("\n");
583 } 594 }
584 595
585 if ((state->data[3] == 9) && (state->data[12] & 0x01)) { 596 if ((rx[3] == 9) && (rx[12] & 0x01)) {
586 /* got a "press" event */ 597 /* got a "press" event */
587 state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]); 598 state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
588 if (debug > 2) 599 if (debug > 2)
589 info("%s: cmd=0x%02x sys=0x%02x\n", 600 info("%s: cmd=0x%02x sys=0x%02x\n",
590 __func__, state->data[6], state->data[7]); 601 __func__, rx[6], rx[7]);
591 602
592 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); 603 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
593 } else if (state->last_rc_key) { 604 } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
595 state->last_rc_key = 0; 606 state->last_rc_key = 0;
596 } 607 }
597ret: 608ret:
598 mutex_unlock(&state->ca_mutex); 609 kfree(b);
599 return ret; 610 return ret;
600} 611}
601 612
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547dbf9806..76382c858c35 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
330 struct ms_id_register id_reg; 330 struct ms_id_register id_reg;
331 331
332 if (!(*mrq)) { 332 if (!(*mrq)) {
333 memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, 333 memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
334 sizeof(struct ms_id_register)); 334 sizeof(struct ms_id_register));
335 *mrq = &card->current_mrq; 335 *mrq = &card->current_mrq;
336 return 0; 336 return 0;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b44306b886cb..73db08558e4d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
3354 3354
3355 if (!slot) 3355 if (!slot)
3356 continue; 3356 continue;
3357 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 3357 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3358 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3358 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3359 dw_mci_setup_bus(slot, true); 3359
3360 } 3360 /* Force setup bus to guarantee available clock output */
3361 dw_mci_setup_bus(slot, true);
3361 } 3362 }
3362 3363
3363 /* Now that slots are all setup, we can enable card detect */ 3364 /* Now that slots are all setup, we can enable card detect */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 23909804ffb8..0def99590d16 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2733 if (intmask & SDHCI_INT_RETUNE) 2733 if (intmask & SDHCI_INT_RETUNE)
2734 mmc_retune_needed(host->mmc); 2734 mmc_retune_needed(host->mmc);
2735 2735
2736 if (intmask & SDHCI_INT_CARD_INT) { 2736 if ((intmask & SDHCI_INT_CARD_INT) &&
2737 (host->ier & SDHCI_INT_CARD_INT)) {
2737 sdhci_enable_sdio_irq_nolock(host, false); 2738 sdhci_enable_sdio_irq_nolock(host, false);
2738 host->thread_isr |= SDHCI_INT_CARD_INT; 2739 host->thread_isr |= SDHCI_INT_CARD_INT;
2739 result = IRQ_WAKE_THREAD; 2740 result = IRQ_WAKE_THREAD;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7be393c96b1a..cf7c18947189 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
161 161
162 dev->irq = pdev->irq; 162 dev->irq = pdev->irq;
163 priv->base = addr; 163 priv->base = addr;
164 priv->device = &pdev->dev;
164 165
165 if (!c_can_pci_data->freq) { 166 if (!c_can_pci_data->freq) {
166 dev_err(&pdev->dev, "no clock frequency defined\n"); 167 dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 680d1ff07a55..6749b1829469 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
948 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, 948 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
949 HECC_DEF_NAPI_WEIGHT); 949 HECC_DEF_NAPI_WEIGHT);
950 950
951 clk_enable(priv->clk); 951 err = clk_prepare_enable(priv->clk);
952 if (err) {
953 dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
954 goto probe_exit_clk;
955 }
956
952 err = register_candev(ndev); 957 err = register_candev(ndev);
953 if (err) { 958 if (err) {
954 dev_err(&pdev->dev, "register_candev() failed\n"); 959 dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
981 struct ti_hecc_priv *priv = netdev_priv(ndev); 986 struct ti_hecc_priv *priv = netdev_priv(ndev);
982 987
983 unregister_candev(ndev); 988 unregister_candev(ndev);
984 clk_disable(priv->clk); 989 clk_disable_unprepare(priv->clk);
985 clk_put(priv->clk); 990 clk_put(priv->clk);
986 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 991 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 iounmap(priv->base); 992 iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
1006 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); 1011 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1007 priv->can.state = CAN_STATE_SLEEPING; 1012 priv->can.state = CAN_STATE_SLEEPING;
1008 1013
1009 clk_disable(priv->clk); 1014 clk_disable_unprepare(priv->clk);
1010 1015
1011 return 0; 1016 return 0;
1012} 1017}
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
1015{ 1020{
1016 struct net_device *dev = platform_get_drvdata(pdev); 1021 struct net_device *dev = platform_get_drvdata(pdev);
1017 struct ti_hecc_priv *priv = netdev_priv(dev); 1022 struct ti_hecc_priv *priv = netdev_priv(dev);
1023 int err;
1018 1024
1019 clk_enable(priv->clk); 1025 err = clk_prepare_enable(priv->clk);
1026 if (err)
1027 return err;
1020 1028
1021 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); 1029 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1022 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
1152 if (skb == NULL) 1152 if (skb == NULL)
1153 break; 1153 break;
1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155 if (pci_dma_mapping_error(np->pci_dev,
1156 np->rx_info[i].mapping)) {
1157 dev_kfree_skb(skb);
1158 np->rx_info[i].skb = NULL;
1159 break;
1160 }
1155 /* Grrr, we cannot offset to correctly align the IP header. */ 1161 /* Grrr, we cannot offset to correctly align the IP header. */
1156 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1162 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1157 } 1163 }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182{ 1188{
1183 struct netdev_private *np = netdev_priv(dev); 1189 struct netdev_private *np = netdev_priv(dev);
1184 unsigned int entry; 1190 unsigned int entry;
1191 unsigned int prev_tx;
1185 u32 status; 1192 u32 status;
1186 int i; 1193 int i, j;
1187 1194
1188 /* 1195 /*
1189 * be cautious here, wrapping the queue has weird semantics 1196 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1201 } 1208 }
1202#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1209#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203 1210
1211 prev_tx = np->cur_tx;
1204 entry = np->cur_tx % TX_RING_SIZE; 1212 entry = np->cur_tx % TX_RING_SIZE;
1205 for (i = 0; i < skb_num_frags(skb); i++) { 1213 for (i = 0; i < skb_num_frags(skb); i++) {
1206 int wrap_ring = 0; 1214 int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1234 skb_frag_size(this_frag), 1242 skb_frag_size(this_frag),
1235 PCI_DMA_TODEVICE); 1243 PCI_DMA_TODEVICE);
1236 } 1244 }
1245 if (pci_dma_mapping_error(np->pci_dev,
1246 np->tx_info[entry].mapping)) {
1247 dev->stats.tx_dropped++;
1248 goto err_out;
1249 }
1237 1250
1238 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1251 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1239 np->tx_ring[entry].status = cpu_to_le32(status); 1252 np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1268 netif_stop_queue(dev); 1281 netif_stop_queue(dev);
1269 1282
1270 return NETDEV_TX_OK; 1283 return NETDEV_TX_OK;
1271}
1272 1284
1285err_out:
1286 entry = prev_tx % TX_RING_SIZE;
1287 np->tx_info[entry].skb = NULL;
1288 if (i > 0) {
1289 pci_unmap_single(np->pci_dev,
1290 np->tx_info[entry].mapping,
1291 skb_first_frag_len(skb),
1292 PCI_DMA_TODEVICE);
1293 np->tx_info[entry].mapping = 0;
1294 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295 for (j = 1; j < i; j++) {
1296 pci_unmap_single(np->pci_dev,
1297 np->tx_info[entry].mapping,
1298 skb_frag_size(
1299 &skb_shinfo(skb)->frags[j-1]),
1300 PCI_DMA_TODEVICE);
1301 entry++;
1302 }
1303 }
1304 dev_kfree_skb_any(skb);
1305 np->cur_tx = prev_tx;
1306 return NETDEV_TX_OK;
1307}
1273 1308
1274/* The interrupt handler does all of the Rx thread work and cleans up 1309/* The interrupt handler does all of the Rx thread work and cleans up
1275 after the Tx thread. */ 1310 after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
1569 break; /* Better luck next round. */ 1604 break; /* Better luck next round. */
1570 np->rx_info[entry].mapping = 1605 np->rx_info[entry].mapping =
1571 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1606 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607 if (pci_dma_mapping_error(np->pci_dev,
1608 np->rx_info[entry].mapping)) {
1609 dev_kfree_skb(skb);
1610 np->rx_info[entry].skb = NULL;
1611 break;
1612 }
1572 np->rx_ring[entry].rxaddr = 1613 np->rx_ring[entry].rxaddr =
1573 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1614 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1574 } 1615 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 5b7ba25e0065..8a280e7d66bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -891,6 +891,8 @@
891#define PCS_V1_WINDOW_SELECT 0x03fc 891#define PCS_V1_WINDOW_SELECT 0x03fc
892#define PCS_V2_WINDOW_DEF 0x9060 892#define PCS_V2_WINDOW_DEF 0x9060
893#define PCS_V2_WINDOW_SELECT 0x9064 893#define PCS_V2_WINDOW_SELECT 0x9064
894#define PCS_V2_RV_WINDOW_DEF 0x1060
895#define PCS_V2_RV_WINDOW_SELECT 0x1064
894 896
895/* PCS register entry bit positions and sizes */ 897/* PCS register entry bit positions and sizes */
896#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 898#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index aaf0350076a9..a7d16db5c4b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1152 1152
1153 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1153 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1154 XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); 1154 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1155 mmd_data = XPCS16_IOREAD(pdata, offset); 1155 mmd_data = XPCS16_IOREAD(pdata, offset);
1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1157 1157
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1184 1184
1185 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1185 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1186 XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); 1186 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1187 XPCS16_IOWRITE(pdata, offset, mmd_data); 1187 XPCS16_IOWRITE(pdata, offset, mmd_data);
1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1189} 1189}
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
3407 3407
3408 /* Flush Tx queues */ 3408 /* Flush Tx queues */
3409 ret = xgbe_flush_tx_queues(pdata); 3409 ret = xgbe_flush_tx_queues(pdata);
3410 if (ret) 3410 if (ret) {
3411 netdev_err(pdata->netdev, "error flushing TX queues\n");
3411 return ret; 3412 return ret;
3413 }
3412 3414
3413 /* 3415 /*
3414 * Initialize DMA related features 3416 * Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 9943629fcbf9..1c87cc204075 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
1070 1070
1071 DBGPR("-->xgbe_start\n"); 1071 DBGPR("-->xgbe_start\n");
1072 1072
1073 hw_if->init(pdata); 1073 ret = hw_if->init(pdata);
1074 if (ret)
1075 return ret;
1074 1076
1075 xgbe_napi_enable(pdata, 1); 1077 xgbe_napi_enable(pdata, 1);
1076 1078
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f65b805..c2730f15bd8b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
265 struct xgbe_prv_data *pdata; 265 struct xgbe_prv_data *pdata;
266 struct device *dev = &pdev->dev; 266 struct device *dev = &pdev->dev;
267 void __iomem * const *iomap_table; 267 void __iomem * const *iomap_table;
268 struct pci_dev *rdev;
268 unsigned int ma_lo, ma_hi; 269 unsigned int ma_lo, ma_hi;
269 unsigned int reg; 270 unsigned int reg;
270 int bar_mask; 271 int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
326 if (netif_msg_probe(pdata)) 327 if (netif_msg_probe(pdata))
327 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); 328 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
328 329
330 /* Set the PCS indirect addressing definition registers */
331 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
332 if (rdev &&
333 (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
334 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
335 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
336 } else {
337 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
338 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
339 }
340 pci_dev_put(rdev);
341
329 /* Configure the PCS indirect addressing support */ 342 /* Configure the PCS indirect addressing support */
330 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 343 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
331 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 344 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
332 pdata->xpcs_window <<= 6; 345 pdata->xpcs_window <<= 6;
333 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 346 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd05bac..00108815b55e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
955 955
956 /* XPCS indirect addressing lock */ 956 /* XPCS indirect addressing lock */
957 spinlock_t xpcs_lock; 957 spinlock_t xpcs_lock;
958 unsigned int xpcs_window_def_reg;
959 unsigned int xpcs_window_sel_reg;
958 unsigned int xpcs_window; 960 unsigned int xpcs_window;
959 unsigned int xpcs_window_size; 961 unsigned int xpcs_window_size;
960 unsigned int xpcs_window_mask; 962 unsigned int xpcs_window_mask;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8f525574d68..7dcc907a449d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
685 return -ENOMEM; 685 return -ENOMEM;
686 } 686 }
687 687
688 alx_reinit_rings(alx);
689
690 return 0; 688 return 0;
691} 689}
692 690
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
703 if (alx->qnapi[0] && alx->qnapi[0]->rxq) 701 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
704 kfree(alx->qnapi[0]->rxq->bufs); 702 kfree(alx->qnapi[0]->rxq->bufs);
705 703
706 if (!alx->descmem.virt) 704 if (alx->descmem.virt)
707 dma_free_coherent(&alx->hw.pdev->dev, 705 dma_free_coherent(&alx->hw.pdev->dev,
708 alx->descmem.size, 706 alx->descmem.size,
709 alx->descmem.virt, 707 alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
984 alx_free_rings(alx); 982 alx_free_rings(alx);
985 alx_free_napis(alx); 983 alx_free_napis(alx);
986 alx_disable_advanced_intr(alx); 984 alx_disable_advanced_intr(alx);
985 alx_init_intr(alx, false);
987 986
988 err = alx_alloc_napis(alx); 987 err = alx_alloc_napis(alx);
989 if (err) 988 if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
1241 if (err) 1240 if (err)
1242 goto out_free_rings; 1241 goto out_free_rings;
1243 1242
1243 /* must be called after alx_request_irq because the chip stops working
1244 * if we copy the dma addresses in alx_init_ring_ptrs twice when
1245 * requesting msi-x interrupts failed
1246 */
1247 alx_reinit_rings(alx);
1248
1244 netif_set_real_num_tx_queues(alx->dev, alx->num_txq); 1249 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1245 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); 1250 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1246 1251
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3b14d5144228..c483618b57bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
913 priv->old_link = 0; 913 priv->old_link = 0;
914 priv->old_duplex = -1; 914 priv->old_duplex = -1;
915 priv->old_pause = -1; 915 priv->old_pause = -1;
916 } else {
917 phydev = NULL;
916 } 918 }
917 919
918 /* mask all interrupts and request them */ 920 /* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
1083 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1085 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1084 ENETDMAC_IRMASK, priv->tx_chan); 1086 ENETDMAC_IRMASK, priv->tx_chan);
1085 1087
1086 if (priv->has_phy) 1088 if (phydev)
1087 phy_start(phydev); 1089 phy_start(phydev);
1088 else 1090 else
1089 bcm_enet_adjust_link(dev); 1091 bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
1126 free_irq(dev->irq, dev); 1128 free_irq(dev->irq, dev);
1127 1129
1128out_phy_disconnect: 1130out_phy_disconnect:
1129 if (priv->has_phy) 1131 if (phydev)
1130 phy_disconnect(phydev); 1132 phy_disconnect(phydev);
1131 1133
1132 return ret; 1134 return ret;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9608cb49a11c..4fcc6a84a087 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1099,7 +1099,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1099{ 1099{
1100#ifdef CONFIG_INET 1100#ifdef CONFIG_INET
1101 struct tcphdr *th; 1101 struct tcphdr *th;
1102 int len, nw_off, tcp_opt_len; 1102 int len, nw_off, tcp_opt_len = 0;
1103 1103
1104 if (tcp_ts) 1104 if (tcp_ts)
1105 tcp_opt_len = 12; 1105 tcp_opt_len = 12;
@@ -5314,17 +5314,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5314 if ((link_info->support_auto_speeds | diff) != 5314 if ((link_info->support_auto_speeds | diff) !=
5315 link_info->support_auto_speeds) { 5315 link_info->support_auto_speeds) {
5316 /* An advertised speed is no longer supported, so we need to 5316 /* An advertised speed is no longer supported, so we need to
5317 * update the advertisement settings. See bnxt_reset() for 5317 * update the advertisement settings. Caller holds RTNL
5318 * comments about the rtnl_lock() sequence below. 5318 * so we can modify link settings.
5319 */ 5319 */
5320 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5321 rtnl_lock();
5322 link_info->advertising = link_info->support_auto_speeds; 5320 link_info->advertising = link_info->support_auto_speeds;
5323 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 5321 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5324 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5325 bnxt_hwrm_set_link_setting(bp, true, false); 5322 bnxt_hwrm_set_link_setting(bp, true, false);
5326 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5327 rtnl_unlock();
5328 } 5323 }
5329 return 0; 5324 return 0;
5330} 5325}
@@ -6200,29 +6195,37 @@ bnxt_restart_timer:
6200 mod_timer(&bp->timer, jiffies + bp->current_interval); 6195 mod_timer(&bp->timer, jiffies + bp->current_interval);
6201} 6196}
6202 6197
6203/* Only called from bnxt_sp_task() */ 6198static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6204static void bnxt_reset(struct bnxt *bp, bool silent)
6205{ 6199{
6206 /* bnxt_reset_task() calls bnxt_close_nic() which waits 6200 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6207 * for BNXT_STATE_IN_SP_TASK to clear. 6201 * set. If the device is being closed, bnxt_close() may be holding
6208 * If there is a parallel dev_close(), bnxt_close() may be holding
6209 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 6202 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6210 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 6203 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6211 */ 6204 */
6212 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6205 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6213 rtnl_lock(); 6206 rtnl_lock();
6214 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6207}
6215 bnxt_reset_task(bp, silent); 6208
6209static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6210{
6216 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6211 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6217 rtnl_unlock(); 6212 rtnl_unlock();
6218} 6213}
6219 6214
6215/* Only called from bnxt_sp_task() */
6216static void bnxt_reset(struct bnxt *bp, bool silent)
6217{
6218 bnxt_rtnl_lock_sp(bp);
6219 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6220 bnxt_reset_task(bp, silent);
6221 bnxt_rtnl_unlock_sp(bp);
6222}
6223
6220static void bnxt_cfg_ntp_filters(struct bnxt *); 6224static void bnxt_cfg_ntp_filters(struct bnxt *);
6221 6225
6222static void bnxt_sp_task(struct work_struct *work) 6226static void bnxt_sp_task(struct work_struct *work)
6223{ 6227{
6224 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 6228 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6225 int rc;
6226 6229
6227 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6230 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6228 smp_mb__after_atomic(); 6231 smp_mb__after_atomic();
@@ -6236,16 +6239,6 @@ static void bnxt_sp_task(struct work_struct *work)
6236 6239
6237 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 6240 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6238 bnxt_cfg_ntp_filters(bp); 6241 bnxt_cfg_ntp_filters(bp);
6239 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6240 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6241 &bp->sp_event))
6242 bnxt_hwrm_phy_qcaps(bp);
6243
6244 rc = bnxt_update_link(bp, true);
6245 if (rc)
6246 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6247 rc);
6248 }
6249 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 6242 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6250 bnxt_hwrm_exec_fwd_req(bp); 6243 bnxt_hwrm_exec_fwd_req(bp);
6251 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6244 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6266,18 +6259,39 @@ static void bnxt_sp_task(struct work_struct *work)
6266 bnxt_hwrm_tunnel_dst_port_free( 6259 bnxt_hwrm_tunnel_dst_port_free(
6267 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6260 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6268 } 6261 }
6262 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6263 bnxt_hwrm_port_qstats(bp);
6264
6265 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
6266 * must be the last functions to be called before exiting.
6267 */
6268 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6269 int rc = 0;
6270
6271 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6272 &bp->sp_event))
6273 bnxt_hwrm_phy_qcaps(bp);
6274
6275 bnxt_rtnl_lock_sp(bp);
6276 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6277 rc = bnxt_update_link(bp, true);
6278 bnxt_rtnl_unlock_sp(bp);
6279 if (rc)
6280 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6281 rc);
6282 }
6283 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6284 bnxt_rtnl_lock_sp(bp);
6285 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6286 bnxt_get_port_module_status(bp);
6287 bnxt_rtnl_unlock_sp(bp);
6288 }
6269 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 6289 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6270 bnxt_reset(bp, false); 6290 bnxt_reset(bp, false);
6271 6291
6272 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 6292 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6273 bnxt_reset(bp, true); 6293 bnxt_reset(bp, true);
6274 6294
6275 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
6276 bnxt_get_port_module_status(bp);
6277
6278 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6279 bnxt_hwrm_port_qstats(bp);
6280
6281 smp_mb__before_atomic(); 6295 smp_mb__before_atomic();
6282 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6296 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6283} 6297}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80acc2da..baba2db9d9c2 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -43,13 +43,13 @@
43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
44#define MIN_RX_RING_SIZE 64 44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192 45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size) 47 * (bp)->rx_ring_size)
48 48
49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
50#define MIN_TX_RING_SIZE 64 50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096 51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size) 53 * (bp)->tx_ring_size)
54 54
55/* level of occupied TX descriptors under which we wake up TX process */ 55/* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
78 */ 78 */
79#define MACB_HALT_TIMEOUT 1230 79#define MACB_HALT_TIMEOUT 1230
80 80
81/* DMA buffer descriptor might be different size
82 * depends on hardware configuration.
83 */
84static unsigned int macb_dma_desc_get_size(struct macb *bp)
85{
86#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
87 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
88 return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
89#endif
90 return sizeof(struct macb_dma_desc);
91}
92
93static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
94{
95#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
96 /* Dma buffer descriptor is 4 words length (instead of 2 words)
97 * for 64b GEM.
98 */
99 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
100 idx <<= 1;
101#endif
102 return idx;
103}
104
105#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
106static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
107{
108 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
109}
110#endif
111
81/* Ring buffer accessors */ 112/* Ring buffer accessors */
82static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 113static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
83{ 114{
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
87static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 118static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
88 unsigned int index) 119 unsigned int index)
89{ 120{
90 return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; 121 index = macb_tx_ring_wrap(queue->bp, index);
122 index = macb_adj_dma_desc_idx(queue->bp, index);
123 return &queue->tx_ring[index];
91} 124}
92 125
93static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 126static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
101 dma_addr_t offset; 134 dma_addr_t offset;
102 135
103 offset = macb_tx_ring_wrap(queue->bp, index) * 136 offset = macb_tx_ring_wrap(queue->bp, index) *
104 sizeof(struct macb_dma_desc); 137 macb_dma_desc_get_size(queue->bp);
105 138
106 return queue->tx_ring_dma + offset; 139 return queue->tx_ring_dma + offset;
107} 140}
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
113 146
114static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) 147static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
115{ 148{
116 return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; 149 index = macb_rx_ring_wrap(bp, index);
150 index = macb_adj_dma_desc_idx(bp, index);
151 return &bp->rx_ring[index];
117} 152}
118 153
119static void *macb_rx_buffer(struct macb *bp, unsigned int index) 154static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
560 } 595 }
561} 596}
562 597
563static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) 598static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
564{ 599{
565 desc->addr = (u32)addr;
566#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 600#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
567 desc->addrh = (u32)(addr >> 32); 601 struct macb_dma_desc_64 *desc_64;
602
603 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
604 desc_64 = macb_64b_desc(bp, desc);
605 desc_64->addrh = upper_32_bits(addr);
606 }
568#endif 607#endif
608 desc->addr = lower_32_bits(addr);
609}
610
611static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
612{
613 dma_addr_t addr = 0;
614#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
615 struct macb_dma_desc_64 *desc_64;
616
617 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
618 desc_64 = macb_64b_desc(bp, desc);
619 addr = ((u64)(desc_64->addrh) << 32);
620 }
621#endif
622 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
623 return addr;
569} 624}
570 625
571static void macb_tx_error_task(struct work_struct *work) 626static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
649 704
650 /* Set end of TX queue */ 705 /* Set end of TX queue */
651 desc = macb_tx_desc(queue, 0); 706 desc = macb_tx_desc(queue, 0);
652 macb_set_addr(desc, 0); 707 macb_set_addr(bp, desc, 0);
653 desc->ctrl = MACB_BIT(TX_USED); 708 desc->ctrl = MACB_BIT(TX_USED);
654 709
655 /* Make descriptor updates visible to hardware */ 710 /* Make descriptor updates visible to hardware */
656 wmb(); 711 wmb();
657 712
658 /* Reinitialize the TX desc queue */ 713 /* Reinitialize the TX desc queue */
659 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 714 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
660#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 715#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
661 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 716 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
717 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
662#endif 718#endif
663 /* Make TX ring reflect state of hardware */ 719 /* Make TX ring reflect state of hardware */
664 queue->tx_head = 0; 720 queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
750 unsigned int entry; 806 unsigned int entry;
751 struct sk_buff *skb; 807 struct sk_buff *skb;
752 dma_addr_t paddr; 808 dma_addr_t paddr;
809 struct macb_dma_desc *desc;
753 810
754 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, 811 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
755 bp->rx_ring_size) > 0) { 812 bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
759 rmb(); 816 rmb();
760 817
761 bp->rx_prepared_head++; 818 bp->rx_prepared_head++;
819 desc = macb_rx_desc(bp, entry);
762 820
763 if (!bp->rx_skbuff[entry]) { 821 if (!bp->rx_skbuff[entry]) {
764 /* allocate sk_buff for this free entry in ring */ 822 /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
782 840
783 if (entry == bp->rx_ring_size - 1) 841 if (entry == bp->rx_ring_size - 1)
784 paddr |= MACB_BIT(RX_WRAP); 842 paddr |= MACB_BIT(RX_WRAP);
785 macb_set_addr(&(bp->rx_ring[entry]), paddr); 843 macb_set_addr(bp, desc, paddr);
786 bp->rx_ring[entry].ctrl = 0; 844 desc->ctrl = 0;
787 845
788 /* properly align Ethernet header */ 846 /* properly align Ethernet header */
789 skb_reserve(skb, NET_IP_ALIGN); 847 skb_reserve(skb, NET_IP_ALIGN);
790 } else { 848 } else {
791 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); 849 desc->addr &= ~MACB_BIT(RX_USED);
792 bp->rx_ring[entry].ctrl = 0; 850 desc->ctrl = 0;
793 } 851 }
794 } 852 }
795 853
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
835 bool rxused; 893 bool rxused;
836 894
837 entry = macb_rx_ring_wrap(bp, bp->rx_tail); 895 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
838 desc = &bp->rx_ring[entry]; 896 desc = macb_rx_desc(bp, entry);
839 897
840 /* Make hw descriptor updates visible to CPU */ 898 /* Make hw descriptor updates visible to CPU */
841 rmb(); 899 rmb();
842 900
843 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 901 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
844 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 902 addr = macb_get_addr(bp, desc);
845#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
846 addr |= ((u64)(desc->addrh) << 32);
847#endif
848 ctrl = desc->ctrl; 903 ctrl = desc->ctrl;
849 904
850 if (!rxused) 905 if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
987static inline void macb_init_rx_ring(struct macb *bp) 1042static inline void macb_init_rx_ring(struct macb *bp)
988{ 1043{
989 dma_addr_t addr; 1044 dma_addr_t addr;
1045 struct macb_dma_desc *desc = NULL;
990 int i; 1046 int i;
991 1047
992 addr = bp->rx_buffers_dma; 1048 addr = bp->rx_buffers_dma;
993 for (i = 0; i < bp->rx_ring_size; i++) { 1049 for (i = 0; i < bp->rx_ring_size; i++) {
994 bp->rx_ring[i].addr = addr; 1050 desc = macb_rx_desc(bp, i);
995 bp->rx_ring[i].ctrl = 0; 1051 macb_set_addr(bp, desc, addr);
1052 desc->ctrl = 0;
996 addr += bp->rx_buffer_size; 1053 addr += bp->rx_buffer_size;
997 } 1054 }
998 bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); 1055 desc->addr |= MACB_BIT(RX_WRAP);
999 bp->rx_tail = 0; 1056 bp->rx_tail = 0;
1000} 1057}
1001 1058
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
1008 1065
1009 for (tail = bp->rx_tail; budget > 0; tail++) { 1066 for (tail = bp->rx_tail; budget > 0; tail++) {
1010 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); 1067 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1011 u32 addr, ctrl; 1068 u32 ctrl;
1012 1069
1013 /* Make hw descriptor updates visible to CPU */ 1070 /* Make hw descriptor updates visible to CPU */
1014 rmb(); 1071 rmb();
1015 1072
1016 addr = desc->addr;
1017 ctrl = desc->ctrl; 1073 ctrl = desc->ctrl;
1018 1074
1019 if (!(addr & MACB_BIT(RX_USED))) 1075 if (!(desc->addr & MACB_BIT(RX_USED)))
1020 break; 1076 break;
1021 1077
1022 if (ctrl & MACB_BIT(RX_SOF)) { 1078 if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1336 i = tx_head; 1392 i = tx_head;
1337 entry = macb_tx_ring_wrap(bp, i); 1393 entry = macb_tx_ring_wrap(bp, i);
1338 ctrl = MACB_BIT(TX_USED); 1394 ctrl = MACB_BIT(TX_USED);
1339 desc = &queue->tx_ring[entry]; 1395 desc = macb_tx_desc(queue, entry);
1340 desc->ctrl = ctrl; 1396 desc->ctrl = ctrl;
1341 1397
1342 if (lso_ctrl) { 1398 if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1358 i--; 1414 i--;
1359 entry = macb_tx_ring_wrap(bp, i); 1415 entry = macb_tx_ring_wrap(bp, i);
1360 tx_skb = &queue->tx_skb[entry]; 1416 tx_skb = &queue->tx_skb[entry];
1361 desc = &queue->tx_ring[entry]; 1417 desc = macb_tx_desc(queue, entry);
1362 1418
1363 ctrl = (u32)tx_skb->size; 1419 ctrl = (u32)tx_skb->size;
1364 if (eof) { 1420 if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1379 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1435 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1380 1436
1381 /* Set TX buffer descriptor */ 1437 /* Set TX buffer descriptor */
1382 macb_set_addr(desc, tx_skb->mapping); 1438 macb_set_addr(bp, desc, tx_skb->mapping);
1383 /* desc->addr must be visible to hardware before clearing 1439 /* desc->addr must be visible to hardware before clearing
1384 * 'TX_USED' bit in desc->ctrl. 1440 * 'TX_USED' bit in desc->ctrl.
1385 */ 1441 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
1586 if (!skb) 1642 if (!skb)
1587 continue; 1643 continue;
1588 1644
1589 desc = &bp->rx_ring[i]; 1645 desc = macb_rx_desc(bp, i);
1590 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1646 addr = macb_get_addr(bp, desc);
1591#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1647
1592 addr |= ((u64)(desc->addrh) << 32);
1593#endif
1594 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1648 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1595 DMA_FROM_DEVICE); 1649 DMA_FROM_DEVICE);
1596 dev_kfree_skb_any(skb); 1650 dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
1711static void gem_init_rings(struct macb *bp) 1765static void gem_init_rings(struct macb *bp)
1712{ 1766{
1713 struct macb_queue *queue; 1767 struct macb_queue *queue;
1768 struct macb_dma_desc *desc = NULL;
1714 unsigned int q; 1769 unsigned int q;
1715 int i; 1770 int i;
1716 1771
1717 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1772 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1718 for (i = 0; i < bp->tx_ring_size; i++) { 1773 for (i = 0; i < bp->tx_ring_size; i++) {
1719 queue->tx_ring[i].addr = 0; 1774 desc = macb_tx_desc(queue, i);
1720 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1775 macb_set_addr(bp, desc, 0);
1776 desc->ctrl = MACB_BIT(TX_USED);
1721 } 1777 }
1722 queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1778 desc->ctrl |= MACB_BIT(TX_WRAP);
1723 queue->tx_head = 0; 1779 queue->tx_head = 0;
1724 queue->tx_tail = 0; 1780 queue->tx_tail = 0;
1725 } 1781 }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
1733static void macb_init_rings(struct macb *bp) 1789static void macb_init_rings(struct macb *bp)
1734{ 1790{
1735 int i; 1791 int i;
1792 struct macb_dma_desc *desc = NULL;
1736 1793
1737 macb_init_rx_ring(bp); 1794 macb_init_rx_ring(bp);
1738 1795
1739 for (i = 0; i < bp->tx_ring_size; i++) { 1796 for (i = 0; i < bp->tx_ring_size; i++) {
1740 bp->queues[0].tx_ring[i].addr = 0; 1797 desc = macb_tx_desc(&bp->queues[0], i);
1741 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1798 macb_set_addr(bp, desc, 0);
1799 desc->ctrl = MACB_BIT(TX_USED);
1742 } 1800 }
1743 bp->queues[0].tx_head = 0; 1801 bp->queues[0].tx_head = 0;
1744 bp->queues[0].tx_tail = 0; 1802 bp->queues[0].tx_tail = 0;
1745 bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1803 desc->ctrl |= MACB_BIT(TX_WRAP);
1746} 1804}
1747 1805
1748static void macb_reset_hw(struct macb *bp) 1806static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
1863 dmacfg &= ~GEM_BIT(TXCOEN); 1921 dmacfg &= ~GEM_BIT(TXCOEN);
1864 1922
1865#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1923#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1866 dmacfg |= GEM_BIT(ADDR64); 1924 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1925 dmacfg |= GEM_BIT(ADDR64);
1867#endif 1926#endif
1868 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 1927 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1869 dmacfg); 1928 dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
1910 macb_configure_dma(bp); 1969 macb_configure_dma(bp);
1911 1970
1912 /* Initialize TX and RX buffers */ 1971 /* Initialize TX and RX buffers */
1913 macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); 1972 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1914#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1973#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1915 macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); 1974 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1975 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1916#endif 1976#endif
1917 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1977 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1918 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 1978 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1919#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1979#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1920 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 1980 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1981 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1921#endif 1982#endif
1922 1983
1923 /* Enable interrupts */ 1984 /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
2627 queue->IMR = GEM_IMR(hw_q - 1); 2688 queue->IMR = GEM_IMR(hw_q - 1);
2628 queue->TBQP = GEM_TBQP(hw_q - 1); 2689 queue->TBQP = GEM_TBQP(hw_q - 1);
2629#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2690#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2630 queue->TBQPH = GEM_TBQPH(hw_q -1); 2691 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2692 queue->TBQPH = GEM_TBQPH(hw_q - 1);
2631#endif 2693#endif
2632 } else { 2694 } else {
2633 /* queue0 uses legacy registers */ 2695 /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
2637 queue->IMR = MACB_IMR; 2699 queue->IMR = MACB_IMR;
2638 queue->TBQP = MACB_TBQP; 2700 queue->TBQP = MACB_TBQP;
2639#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2701#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2640 queue->TBQPH = MACB_TBQPH; 2702 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2703 queue->TBQPH = MACB_TBQPH;
2641#endif 2704#endif
2642 } 2705 }
2643 2706
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
2730static int at91ether_start(struct net_device *dev) 2793static int at91ether_start(struct net_device *dev)
2731{ 2794{
2732 struct macb *lp = netdev_priv(dev); 2795 struct macb *lp = netdev_priv(dev);
2796 struct macb_dma_desc *desc;
2733 dma_addr_t addr; 2797 dma_addr_t addr;
2734 u32 ctl; 2798 u32 ctl;
2735 int i; 2799 int i;
2736 2800
2737 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 2801 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2738 (AT91ETHER_MAX_RX_DESCR * 2802 (AT91ETHER_MAX_RX_DESCR *
2739 sizeof(struct macb_dma_desc)), 2803 macb_dma_desc_get_size(lp)),
2740 &lp->rx_ring_dma, GFP_KERNEL); 2804 &lp->rx_ring_dma, GFP_KERNEL);
2741 if (!lp->rx_ring) 2805 if (!lp->rx_ring)
2742 return -ENOMEM; 2806 return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
2748 if (!lp->rx_buffers) { 2812 if (!lp->rx_buffers) {
2749 dma_free_coherent(&lp->pdev->dev, 2813 dma_free_coherent(&lp->pdev->dev,
2750 AT91ETHER_MAX_RX_DESCR * 2814 AT91ETHER_MAX_RX_DESCR *
2751 sizeof(struct macb_dma_desc), 2815 macb_dma_desc_get_size(lp),
2752 lp->rx_ring, lp->rx_ring_dma); 2816 lp->rx_ring, lp->rx_ring_dma);
2753 lp->rx_ring = NULL; 2817 lp->rx_ring = NULL;
2754 return -ENOMEM; 2818 return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
2756 2820
2757 addr = lp->rx_buffers_dma; 2821 addr = lp->rx_buffers_dma;
2758 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 2822 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2759 lp->rx_ring[i].addr = addr; 2823 desc = macb_rx_desc(lp, i);
2760 lp->rx_ring[i].ctrl = 0; 2824 macb_set_addr(lp, desc, addr);
2825 desc->ctrl = 0;
2761 addr += AT91ETHER_MAX_RBUFF_SZ; 2826 addr += AT91ETHER_MAX_RBUFF_SZ;
2762 } 2827 }
2763 2828
2764 /* Set the Wrap bit on the last descriptor */ 2829 /* Set the Wrap bit on the last descriptor */
2765 lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); 2830 desc->addr |= MACB_BIT(RX_WRAP);
2766 2831
2767 /* Reset buffer index */ 2832 /* Reset buffer index */
2768 lp->rx_tail = 0; 2833 lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
2834 2899
2835 dma_free_coherent(&lp->pdev->dev, 2900 dma_free_coherent(&lp->pdev->dev,
2836 AT91ETHER_MAX_RX_DESCR * 2901 AT91ETHER_MAX_RX_DESCR *
2837 sizeof(struct macb_dma_desc), 2902 macb_dma_desc_get_size(lp),
2838 lp->rx_ring, lp->rx_ring_dma); 2903 lp->rx_ring, lp->rx_ring_dma);
2839 lp->rx_ring = NULL; 2904 lp->rx_ring = NULL;
2840 2905
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2885static void at91ether_rx(struct net_device *dev) 2950static void at91ether_rx(struct net_device *dev)
2886{ 2951{
2887 struct macb *lp = netdev_priv(dev); 2952 struct macb *lp = netdev_priv(dev);
2953 struct macb_dma_desc *desc;
2888 unsigned char *p_recv; 2954 unsigned char *p_recv;
2889 struct sk_buff *skb; 2955 struct sk_buff *skb;
2890 unsigned int pktlen; 2956 unsigned int pktlen;
2891 2957
2892 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { 2958 desc = macb_rx_desc(lp, lp->rx_tail);
2959 while (desc->addr & MACB_BIT(RX_USED)) {
2893 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 2960 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2894 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); 2961 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
2895 skb = netdev_alloc_skb(dev, pktlen + 2); 2962 skb = netdev_alloc_skb(dev, pktlen + 2);
2896 if (skb) { 2963 if (skb) {
2897 skb_reserve(skb, 2); 2964 skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
2905 lp->stats.rx_dropped++; 2972 lp->stats.rx_dropped++;
2906 } 2973 }
2907 2974
2908 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 2975 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
2909 lp->stats.multicast++; 2976 lp->stats.multicast++;
2910 2977
2911 /* reset ownership bit */ 2978 /* reset ownership bit */
2912 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); 2979 desc->addr &= ~MACB_BIT(RX_USED);
2913 2980
2914 /* wrap after last buffer */ 2981 /* wrap after last buffer */
2915 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 2982 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2916 lp->rx_tail = 0; 2983 lp->rx_tail = 0;
2917 else 2984 else
2918 lp->rx_tail++; 2985 lp->rx_tail++;
2986
2987 desc = macb_rx_desc(lp, lp->rx_tail);
2919 } 2988 }
2920} 2989}
2921 2990
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
3211 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3280 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3212 3281
3213#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3282#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3214 if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) 3283 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3215 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3284 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3285 bp->hw_dma_cap = HW_DMA_CAP_64B;
3286 } else
3287 bp->hw_dma_cap = HW_DMA_CAP_32B;
3216#endif 3288#endif
3217 3289
3218 spin_lock_init(&bp->lock); 3290 spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad67be1..fc8550a5d47f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -385,6 +385,8 @@
385/* Bitfields in DCFG6. */ 385/* Bitfields in DCFG6. */
386#define GEM_PBUF_LSO_OFFSET 27 386#define GEM_PBUF_LSO_OFFSET 27
387#define GEM_PBUF_LSO_SIZE 1 387#define GEM_PBUF_LSO_SIZE 1
388#define GEM_DAW64_OFFSET 23
389#define GEM_DAW64_SIZE 1
388 390
389/* Constants for CLK */ 391/* Constants for CLK */
390#define MACB_CLK_DIV8 0 392#define MACB_CLK_DIV8 0
@@ -487,12 +489,20 @@
487struct macb_dma_desc { 489struct macb_dma_desc {
488 u32 addr; 490 u32 addr;
489 u32 ctrl; 491 u32 ctrl;
492};
493
490#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 494#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
491 u32 addrh; 495enum macb_hw_dma_cap {
492 u32 resvd; 496 HW_DMA_CAP_32B,
493#endif 497 HW_DMA_CAP_64B,
494}; 498};
495 499
500struct macb_dma_desc_64 {
501 u32 addrh;
502 u32 resvd;
503};
504#endif
505
496/* DMA descriptor bitfields */ 506/* DMA descriptor bitfields */
497#define MACB_RX_USED_OFFSET 0 507#define MACB_RX_USED_OFFSET 0
498#define MACB_RX_USED_SIZE 1 508#define MACB_RX_USED_SIZE 1
@@ -874,6 +884,10 @@ struct macb {
874 unsigned int jumbo_max_len; 884 unsigned int jumbo_max_len;
875 885
876 u32 wol; 886 u32 wol;
887
888#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
889 enum macb_hw_dma_cap hw_dma_cap;
890#endif
877}; 891};
878 892
879static inline bool macb_is_gem(struct macb *bp) 893static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befedef709..578c7f8f11bf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
116 int speed = 2; 116 int speed = 2;
117 117
118 if (!xcv) { 118 if (!xcv) {
119 dev_err(&xcv->pdev->dev, 119 pr_err("XCV init not done, probe may have failed\n");
120 "XCV init not done, probe may have failed\n");
121 return; 120 return;
122 } 121 }
123 122
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1a7f8ad7b9c6..cd49a54c538d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
362 status = -EPERM; 362 status = -EPERM;
363 goto err; 363 goto err;
364 } 364 }
365done: 365
366 /* Remember currently programmed MAC */
366 ether_addr_copy(adapter->dev_mac, addr->sa_data); 367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
368done:
367 ether_addr_copy(netdev->dev_addr, addr->sa_data); 369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
368 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); 370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
369 return 0; 371 return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
3618{ 3620{
3619 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ 3621 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3620 if (!BEx_chip(adapter) || !be_virtfn(adapter) || 3622 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3621 check_privilege(adapter, BE_PRIV_FILTMGMT)) 3623 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3622 be_dev_mac_del(adapter, adapter->pmac_id[0]); 3624 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3625 eth_zero_addr(adapter->dev_mac);
3626 }
3623 3627
3624 be_clear_uc_list(adapter); 3628 be_clear_uc_list(adapter);
3625 be_clear_mc_list(adapter); 3629 be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3773 if (status) 3777 if (status)
3774 return status; 3778 return status;
3775 3779
3776 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ 3780 /* Normally this condition usually true as the ->dev_mac is zeroed.
3777 if (!BEx_chip(adapter) || !be_virtfn(adapter) || 3781 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3778 check_privilege(adapter, BE_PRIV_FILTMGMT)) { 3782 * subsequent be_dev_mac_add() can fail (after fresh boot)
3783 */
3784 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3785 int old_pmac_id = -1;
3786
3787 /* Remember old programmed MAC if any - can happen on BE3 VF */
3788 if (!is_zero_ether_addr(adapter->dev_mac))
3789 old_pmac_id = adapter->pmac_id[0];
3790
3779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); 3791 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3780 if (status) 3792 if (status)
3781 return status; 3793 return status;
3794
3795 /* Delete the old programmed MAC as we successfully programmed
3796 * a new MAC
3797 */
3798 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3799 be_dev_mac_del(adapter, old_pmac_id);
3800
3782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); 3801 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3783 } 3802 }
3784 3803
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
4552 4571
4553 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4572 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4554 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4573 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4574
4575 /* Initial MAC for BE3 VFs is already programmed by PF */
4576 if (BEx_chip(adapter) && be_virtfn(adapter))
4577 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4555 } 4578 }
4556 4579
4557 return 0; 4580 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a6e7afa878be..957bfc220978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2010 if (!rxb->page) 2010 if (!rxb->page)
2011 continue; 2011 continue;
2012 2012
2013 dma_unmap_single(rx_queue->dev, rxb->dma, 2013 dma_unmap_page(rx_queue->dev, rxb->dma,
2014 PAGE_SIZE, DMA_FROM_DEVICE); 2014 PAGE_SIZE, DMA_FROM_DEVICE);
2015 __free_page(rxb->page); 2015 __free_page(rxb->page);
2016 2016
2017 rxb->page = NULL; 2017 rxb->page = NULL;
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2948 } 2948 }
2949 2949
2950 /* try reuse page */ 2950 /* try reuse page */
2951 if (unlikely(page_count(page) != 1)) 2951 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2952 return false; 2952 return false;
2953 2953
2954 /* change offset to the other half */ 2954 /* change offset to the other half */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a831f947ca8c..309f5c66083c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1601,8 +1601,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1601 netdev->netdev_ops = &ibmveth_netdev_ops; 1601 netdev->netdev_ops = &ibmveth_netdev_ops;
1602 netdev->ethtool_ops = &netdev_ethtool_ops; 1602 netdev->ethtool_ops = &netdev_ethtool_ops;
1603 SET_NETDEV_DEV(netdev, &dev->dev); 1603 SET_NETDEV_DEV(netdev, &dev->dev);
1604 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 1604 netdev->hw_features = NETIF_F_SG;
1605 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1605 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1606 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1607 NETIF_F_RXCSUM;
1608 }
1606 1609
1607 netdev->features |= netdev->hw_features; 1610 netdev->features |= netdev->hw_features;
1608 1611
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3dd87889e67e..1c29c86f8709 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2517,7 +2517,7 @@ static int mtk_remove(struct platform_device *pdev)
2517} 2517}
2518 2518
2519const struct of_device_id of_mtk_match[] = { 2519const struct of_device_id of_mtk_match[] = {
2520 { .compatible = "mediatek,mt7623-eth" }, 2520 { .compatible = "mediatek,mt2701-eth" },
2521 {}, 2521 {},
2522}; 2522};
2523MODULE_DEVICE_TABLE(of, of_mtk_match); 2523MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e939945259..53daa6ca5d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
158 return -ETIMEDOUT; 158 return -ETIMEDOUT;
159} 159}
160 160
161static int mlx4_comm_internal_err(u32 slave_read) 161int mlx4_comm_internal_err(u32 slave_read)
162{ 162{
163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == 163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; 164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d9c9f86a30df..d5a9372ed84d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
1732{ 1732{
1733 struct mlx4_en_priv *priv = netdev_priv(dev); 1733 struct mlx4_en_priv *priv = netdev_priv(dev);
1734 1734
1735 memset(channel, 0, sizeof(*channel));
1736
1737 channel->max_rx = MAX_RX_RINGS; 1735 channel->max_rx = MAX_RX_RINGS;
1738 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; 1736 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1739 1737
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1752 int xdp_count; 1750 int xdp_count;
1753 int err = 0; 1751 int err = 0;
1754 1752
1755 if (channel->other_count || channel->combined_count || 1753 if (!channel->tx_count || !channel->rx_count)
1756 channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1757 channel->rx_count > MAX_RX_RINGS ||
1758 !channel->tx_count || !channel->rx_count)
1759 return -EINVAL; 1754 return -EINVAL;
1760 1755
1761 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 1756 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c44931f..8258d08acd8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
222 return; 222 return;
223 223
224 mlx4_stop_catas_poll(dev); 224 mlx4_stop_catas_poll(dev);
225 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
226 mlx4_is_slave(dev)) {
227 /* In mlx4_remove_one on a VF */
228 u32 slave_read =
229 swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
230
231 if (mlx4_comm_internal_err(slave_read)) {
232 mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
233 __func__);
234 mlx4_enter_error_state(dev->persist);
235 }
236 }
225 mutex_lock(&intf_mutex); 237 mutex_lock(&intf_mutex);
226 238
227 list_for_each_entry(intf, &intf_list, list) 239 list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8a5923..086920b615af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1221 1221
1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); 1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
1223int mlx4_comm_internal_err(u32 slave_read);
1223 1224
1224int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1225int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1225 enum mlx4_port_type *type); 1226 enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1728 if (cmd->cmdif_rev > CMD_IF_REV) { 1728 if (cmd->cmdif_rev > CMD_IF_REV) {
1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1730 CMD_IF_REV, cmd->cmdif_rev); 1730 CMD_IF_REV, cmd->cmdif_rev);
1731 err = -ENOTSUPP; 1731 err = -EOPNOTSUPP;
1732 goto err_free_page; 1732 goto err_free_page;
1733 } 1733 }
1734 1734
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..d5ecb8f53fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); 791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
792 792
793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); 793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
794void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); 794void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
795 enum mlx5e_traffic_types tt);
795 796
796int mlx5e_open_locked(struct net_device *netdev); 797int mlx5e_open_locked(struct net_device *netdev);
797int mlx5e_close_locked(struct net_device *netdev); 798int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
863 864
864static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) 865static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
865{ 866{
866 return -ENOTSUPP; 867 return -EOPNOTSUPP;
867} 868}
868 869
869static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) 870static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
870{ 871{
871 return -ENOTSUPP; 872 return -EOPNOTSUPP;
872} 873}
873#else 874#else
874int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); 875int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f0b460f47f29..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
89 int i; 89 int i;
90 90
91 if (!MLX5_CAP_GEN(priv->mdev, ets)) 91 if (!MLX5_CAP_GEN(priv->mdev, ets))
92 return -ENOTSUPP; 92 return -EOPNOTSUPP;
93 93
94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; 94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
95 for (i = 0; i < ets->ets_cap; i++) { 95 for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
236 int err; 236 int err;
237 237
238 if (!MLX5_CAP_GEN(priv->mdev, ets)) 238 if (!MLX5_CAP_GEN(priv->mdev, ets))
239 return -ENOTSUPP; 239 return -EOPNOTSUPP;
240 240
241 err = mlx5e_dbcnl_validate_ets(netdev, ets); 241 err = mlx5e_dbcnl_validate_ets(netdev, ets);
242 if (err) 242 if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
402 struct mlx5_core_dev *mdev = priv->mdev; 402 struct mlx5_core_dev *mdev = priv->mdev;
403 struct ieee_ets ets; 403 struct ieee_ets ets;
404 struct ieee_pfc pfc; 404 struct ieee_pfc pfc;
405 int err = -ENOTSUPP; 405 int err = -EOPNOTSUPP;
406 int i; 406 int i;
407 407
408 if (!MLX5_CAP_GEN(mdev, ets)) 408 if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
511 struct mlx5e_priv *priv = netdev_priv(netdev); 511 struct mlx5e_priv *priv = netdev_priv(netdev);
512 struct mlx5_core_dev *mdev = priv->mdev; 512 struct mlx5_core_dev *mdev = priv->mdev;
513 513
514 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
515 netdev_err(netdev, "%s, ets is not supported\n", __func__);
516 return;
517 }
518
514 if (priority >= CEE_DCBX_MAX_PRIO) { 519 if (priority >= CEE_DCBX_MAX_PRIO) {
515 netdev_err(netdev, 520 netdev_err(netdev,
516 "%s, priority is out of range\n", __func__); 521 "%s, priority is out of range\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 33a399a8b5d5..bb67863aa361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -543,7 +543,6 @@ static int mlx5e_set_channels(struct net_device *dev,
543 struct ethtool_channels *ch) 543 struct ethtool_channels *ch)
544{ 544{
545 struct mlx5e_priv *priv = netdev_priv(dev); 545 struct mlx5e_priv *priv = netdev_priv(dev);
546 int ncv = mlx5e_get_max_num_channels(priv->mdev);
547 unsigned int count = ch->combined_count; 546 unsigned int count = ch->combined_count;
548 bool arfs_enabled; 547 bool arfs_enabled;
549 bool was_opened; 548 bool was_opened;
@@ -554,16 +553,6 @@ static int mlx5e_set_channels(struct net_device *dev,
554 __func__); 553 __func__);
555 return -EINVAL; 554 return -EINVAL;
556 } 555 }
557 if (ch->rx_count || ch->tx_count) {
558 netdev_info(dev, "%s: separate rx/tx count not supported\n",
559 __func__);
560 return -EINVAL;
561 }
562 if (count > ncv) {
563 netdev_info(dev, "%s: count (%d) > max (%d)\n",
564 __func__, count, ncv);
565 return -EINVAL;
566 }
567 556
568 if (priv->params.num_channels == count) 557 if (priv->params.num_channels == count)
569 return 0; 558 return 0;
@@ -606,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
606 struct mlx5e_priv *priv = netdev_priv(netdev); 595 struct mlx5e_priv *priv = netdev_priv(netdev);
607 596
608 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
609 return -ENOTSUPP; 598 return -EOPNOTSUPP;
610 599
611 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; 600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
612 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; 601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -631,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
631 int i; 620 int i;
632 621
633 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 622 if (!MLX5_CAP_GEN(mdev, cq_moderation))
634 return -ENOTSUPP; 623 return -EOPNOTSUPP;
635 624
636 mutex_lock(&priv->state_lock); 625 mutex_lock(&priv->state_lock);
637 626
@@ -991,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
991 980
992static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) 981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
993{ 982{
994 struct mlx5_core_dev *mdev = priv->mdev;
995 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 983 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
996 int i; 984 struct mlx5_core_dev *mdev = priv->mdev;
985 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
986 int tt;
997 987
998 MLX5_SET(modify_tir_in, in, bitmask.hash, 1); 988 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
999 mlx5e_build_tir_ctx_hash(tirc, priv);
1000 989
1001 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 990 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
1002 mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); 991 memset(tirc, 0, ctxlen);
992 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
993 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
994 }
1003} 995}
1004 996
1005static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 997static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -1007,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1007{ 999{
1008 struct mlx5e_priv *priv = netdev_priv(dev); 1000 struct mlx5e_priv *priv = netdev_priv(dev);
1009 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1001 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1002 bool hash_changed = false;
1010 void *in; 1003 void *in;
1011 1004
1012 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 1005 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1028,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1028 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); 1021 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1029 } 1022 }
1030 1023
1031 if (key) 1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1025 hfunc != priv->params.rss_hfunc) {
1026 priv->params.rss_hfunc = hfunc;
1027 hash_changed = true;
1028 }
1029
1030 if (key) {
1032 memcpy(priv->params.toeplitz_hash_key, key, 1031 memcpy(priv->params.toeplitz_hash_key, key,
1033 sizeof(priv->params.toeplitz_hash_key)); 1032 sizeof(priv->params.toeplitz_hash_key));
1033 hash_changed = hash_changed ||
1034 priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1035 }
1034 1036
1035 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 1037 if (hash_changed)
1036 priv->params.rss_hfunc = hfunc; 1038 mlx5e_modify_tirs_hash(priv, in, inlen);
1037
1038 mlx5e_modify_tirs_hash(priv, in, inlen);
1039 1039
1040 mutex_unlock(&priv->state_lock); 1040 mutex_unlock(&priv->state_lock);
1041 1041
@@ -1307,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1307 u32 mlx5_wol_mode; 1307 u32 mlx5_wol_mode;
1308 1308
1309 if (!wol_supported) 1309 if (!wol_supported)
1310 return -ENOTSUPP; 1310 return -EOPNOTSUPP;
1311 1311
1312 if (wol->wolopts & ~wol_supported) 1312 if (wol->wolopts & ~wol_supported)
1313 return -EINVAL; 1313 return -EINVAL;
@@ -1437,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1437 1437
1438 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && 1438 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1439 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) 1439 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1440 return -ENOTSUPP; 1440 return -EOPNOTSUPP;
1441 1441
1442 if (!rx_mode_changed) 1442 if (!rx_mode_changed)
1443 return 0; 1443 return 0;
@@ -1463,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1463 bool reset; 1463 bool reset;
1464 1464
1465 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1465 if (!MLX5_CAP_GEN(mdev, cqe_compression))
1466 return -ENOTSUPP; 1466 return -EOPNOTSUPP;
1467 1467
1468 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 1468 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
1469 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); 1469 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..a0e5a69402b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1089 MLX5_FLOW_NAMESPACE_KERNEL); 1089 MLX5_FLOW_NAMESPACE_KERNEL);
1090 1090
1091 if (!priv->fs.ns) 1091 if (!priv->fs.ns)
1092 return -EINVAL; 1092 return -EOPNOTSUPP;
1093 1093
1094 err = mlx5e_arfs_create_tables(priv); 1094 err = mlx5e_arfs_create_tables(priv);
1095 if (err) { 1095 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088effd7160..f33f72d0237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
92 ns = mlx5_get_flow_namespace(priv->mdev, 92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL); 93 MLX5_FLOW_NAMESPACE_ETHTOOL);
94 if (!ns) 94 if (!ns)
95 return ERR_PTR(-ENOTSUPP); 95 return ERR_PTR(-EOPNOTSUPP);
96 96
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, 97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)), 98 flow_table_properties_nic_receive.log_max_ft_size)),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2b7dd315020c..f14ca3385fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); 2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
2023} 2023}
2024 2024
2025void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) 2025void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
2026 enum mlx5e_traffic_types tt)
2026{ 2027{
2028 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2029
2030#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2031 MLX5_HASH_FIELD_SEL_DST_IP)
2032
2033#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2034 MLX5_HASH_FIELD_SEL_DST_IP |\
2035 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2036 MLX5_HASH_FIELD_SEL_L4_DPORT)
2037
2038#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2039 MLX5_HASH_FIELD_SEL_DST_IP |\
2040 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2041
2027 MLX5_SET(tirc, tirc, rx_hash_fn, 2042 MLX5_SET(tirc, tirc, rx_hash_fn,
2028 mlx5e_rx_hash_fn(priv->params.rss_hfunc)); 2043 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2029 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { 2044 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
2035 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2050 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2036 memcpy(rss_key, priv->params.toeplitz_hash_key, len); 2051 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2037 } 2052 }
2053
2054 switch (tt) {
2055 case MLX5E_TT_IPV4_TCP:
2056 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2057 MLX5_L3_PROT_TYPE_IPV4);
2058 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2059 MLX5_L4_PROT_TYPE_TCP);
2060 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2061 MLX5_HASH_IP_L4PORTS);
2062 break;
2063
2064 case MLX5E_TT_IPV6_TCP:
2065 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2066 MLX5_L3_PROT_TYPE_IPV6);
2067 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2068 MLX5_L4_PROT_TYPE_TCP);
2069 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2070 MLX5_HASH_IP_L4PORTS);
2071 break;
2072
2073 case MLX5E_TT_IPV4_UDP:
2074 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2075 MLX5_L3_PROT_TYPE_IPV4);
2076 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2077 MLX5_L4_PROT_TYPE_UDP);
2078 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2079 MLX5_HASH_IP_L4PORTS);
2080 break;
2081
2082 case MLX5E_TT_IPV6_UDP:
2083 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2084 MLX5_L3_PROT_TYPE_IPV6);
2085 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2086 MLX5_L4_PROT_TYPE_UDP);
2087 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2088 MLX5_HASH_IP_L4PORTS);
2089 break;
2090
2091 case MLX5E_TT_IPV4_IPSEC_AH:
2092 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2093 MLX5_L3_PROT_TYPE_IPV4);
2094 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2095 MLX5_HASH_IP_IPSEC_SPI);
2096 break;
2097
2098 case MLX5E_TT_IPV6_IPSEC_AH:
2099 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2100 MLX5_L3_PROT_TYPE_IPV6);
2101 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2102 MLX5_HASH_IP_IPSEC_SPI);
2103 break;
2104
2105 case MLX5E_TT_IPV4_IPSEC_ESP:
2106 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2107 MLX5_L3_PROT_TYPE_IPV4);
2108 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2109 MLX5_HASH_IP_IPSEC_SPI);
2110 break;
2111
2112 case MLX5E_TT_IPV6_IPSEC_ESP:
2113 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2114 MLX5_L3_PROT_TYPE_IPV6);
2115 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2116 MLX5_HASH_IP_IPSEC_SPI);
2117 break;
2118
2119 case MLX5E_TT_IPV4:
2120 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2121 MLX5_L3_PROT_TYPE_IPV4);
2122 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2123 MLX5_HASH_IP);
2124 break;
2125
2126 case MLX5E_TT_IPV6:
2127 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2128 MLX5_L3_PROT_TYPE_IPV6);
2129 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2130 MLX5_HASH_IP);
2131 break;
2132 default:
2133 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2134 }
2038} 2135}
2039 2136
2040static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) 2137static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2404static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2501static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2405 enum mlx5e_traffic_types tt) 2502 enum mlx5e_traffic_types tt)
2406{ 2503{
2407 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2408
2409 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); 2504 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2410 2505
2411#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2412 MLX5_HASH_FIELD_SEL_DST_IP)
2413
2414#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2415 MLX5_HASH_FIELD_SEL_DST_IP |\
2416 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2417 MLX5_HASH_FIELD_SEL_L4_DPORT)
2418
2419#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2420 MLX5_HASH_FIELD_SEL_DST_IP |\
2421 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2422
2423 mlx5e_build_tir_ctx_lro(tirc, priv); 2506 mlx5e_build_tir_ctx_lro(tirc, priv);
2424 2507
2425 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 2508 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2426 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); 2509 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2427 mlx5e_build_tir_ctx_hash(tirc, priv); 2510 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
2428
2429 switch (tt) {
2430 case MLX5E_TT_IPV4_TCP:
2431 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2432 MLX5_L3_PROT_TYPE_IPV4);
2433 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2434 MLX5_L4_PROT_TYPE_TCP);
2435 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2436 MLX5_HASH_IP_L4PORTS);
2437 break;
2438
2439 case MLX5E_TT_IPV6_TCP:
2440 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2441 MLX5_L3_PROT_TYPE_IPV6);
2442 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2443 MLX5_L4_PROT_TYPE_TCP);
2444 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2445 MLX5_HASH_IP_L4PORTS);
2446 break;
2447
2448 case MLX5E_TT_IPV4_UDP:
2449 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2450 MLX5_L3_PROT_TYPE_IPV4);
2451 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2452 MLX5_L4_PROT_TYPE_UDP);
2453 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2454 MLX5_HASH_IP_L4PORTS);
2455 break;
2456
2457 case MLX5E_TT_IPV6_UDP:
2458 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2459 MLX5_L3_PROT_TYPE_IPV6);
2460 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2461 MLX5_L4_PROT_TYPE_UDP);
2462 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463 MLX5_HASH_IP_L4PORTS);
2464 break;
2465
2466 case MLX5E_TT_IPV4_IPSEC_AH:
2467 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468 MLX5_L3_PROT_TYPE_IPV4);
2469 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470 MLX5_HASH_IP_IPSEC_SPI);
2471 break;
2472
2473 case MLX5E_TT_IPV6_IPSEC_AH:
2474 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475 MLX5_L3_PROT_TYPE_IPV6);
2476 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477 MLX5_HASH_IP_IPSEC_SPI);
2478 break;
2479
2480 case MLX5E_TT_IPV4_IPSEC_ESP:
2481 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482 MLX5_L3_PROT_TYPE_IPV4);
2483 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484 MLX5_HASH_IP_IPSEC_SPI);
2485 break;
2486
2487 case MLX5E_TT_IPV6_IPSEC_ESP:
2488 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489 MLX5_L3_PROT_TYPE_IPV6);
2490 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491 MLX5_HASH_IP_IPSEC_SPI);
2492 break;
2493
2494 case MLX5E_TT_IPV4:
2495 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2496 MLX5_L3_PROT_TYPE_IPV4);
2497 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2498 MLX5_HASH_IP);
2499 break;
2500
2501 case MLX5E_TT_IPV6:
2502 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2503 MLX5_L3_PROT_TYPE_IPV6);
2504 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2505 MLX5_HASH_IP);
2506 break;
2507 default:
2508 WARN_ONCE(true,
2509 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2510 }
2511} 2511}
2512 2512
2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3332{ 3332{
3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3334 return -ENOTSUPP; 3334 return -EOPNOTSUPP;
3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || 3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3336 !MLX5_CAP_GEN(mdev, nic_flow_table) || 3336 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3337 !MLX5_CAP_ETH(mdev, csum_cap) || 3337 !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3343 < 3) { 3343 < 3) {
3344 mlx5_core_warn(mdev, 3344 mlx5_core_warn(mdev,
3345 "Not creating net device, some required device capabilities are missing\n"); 3345 "Not creating net device, some required device capabilities are missing\n");
3346 return -ENOTSUPP; 3346 return -EOPNOTSUPP;
3347 } 3347 }
3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) 3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); 3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0e2fb3ed1790..06d5e6fecb0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
193 return false; 193 return false;
194 } 194 }
195 195
196 if (unlikely(page_is_pfmemalloc(dma_info->page)))
197 return false;
198
196 cache->page_cache[cache->tail] = *dma_info; 199 cache->page_cache[cache->tail] = *dma_info;
197 cache->tail = tail_next; 200 cache->tail = tail_next;
198 return true; 201 return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 46bef6a26a8c..c5282b6aba8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
663 __be32 *saddr, 663 __be32 *saddr,
664 int *out_ttl) 664 int *out_ttl)
665{ 665{
666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
666 struct rtable *rt; 667 struct rtable *rt;
667 struct neighbour *n = NULL; 668 struct neighbour *n = NULL;
668 int ttl; 669 int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
677#else 678#else
678 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
679#endif 680#endif
680 681 /* if the egress device isn't on the same HW e-switch, we use the uplink */
681 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { 682 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
682 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); 683 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
683 ip_rt_put(rt); 684 else
684 return -EOPNOTSUPP; 685 *out_dev = rt->dst.dev;
685 }
686 686
687 ttl = ip4_dst_hoplimit(&rt->dst); 687 ttl = ip4_dst_hoplimit(&rt->dst);
688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
693 *out_n = n; 693 *out_n = n;
694 *saddr = fl4->saddr; 694 *saddr = fl4->saddr;
695 *out_ttl = ttl; 695 *out_ttl = ttl;
696 *out_dev = rt->dst.dev;
697 696
698 return 0; 697 return 0;
699} 698}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9ba773..d0c8bf014453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 133
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 return -ENOTSUPP; 136 return -EOPNOTSUPP;
137 137
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
139 vport, vlan, qos, set_flags); 139 vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
354 if (!root_ns) { 354 if (!root_ns) {
355 esw_warn(dev, "Failed to get FDB flow namespace\n"); 355 esw_warn(dev, "Failed to get FDB flow namespace\n");
356 return -ENOMEM; 356 return -EOPNOTSUPP;
357 } 357 }
358 358
359 flow_group_in = mlx5_vzalloc(inlen); 359 flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
963 if (!root_ns) { 963 if (!root_ns) {
964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
965 return -EIO; 965 return -EOPNOTSUPP;
966 } 966 }
967 967
968 flow_group_in = mlx5_vzalloc(inlen); 968 flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1080 if (!root_ns) { 1080 if (!root_ns) {
1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1082 return -EIO; 1082 return -EOPNOTSUPP;
1083 } 1083 }
1084 1084
1085 flow_group_in = mlx5_vzalloc(inlen); 1085 flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || 1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1633 return -ENOTSUPP; 1633 return -EOPNOTSUPP;
1634 } 1634 }
1635 1635
1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed1cc22..595f7c7383b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
166 return 0; 166 return 0;
167 167
168out_notsupp: 168out_notsupp:
169 return -ENOTSUPP; 169 return -EOPNOTSUPP;
170} 170}
171 171
172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
425 if (!root_ns) { 425 if (!root_ns) {
426 esw_warn(dev, "Failed to get FDB flow namespace\n"); 426 esw_warn(dev, "Failed to get FDB flow namespace\n");
427 err = -EOPNOTSUPP;
427 goto ns_err; 428 goto ns_err;
428 } 429 }
429 430
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
535 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 536 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
536 if (!ns) { 537 if (!ns) {
537 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 538 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
538 return -ENOMEM; 539 return -EOPNOTSUPP;
539 } 540 }
540 541
541 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); 542 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
655 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); 656 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
656 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 657 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
657 if (err1) 658 if (err1)
658 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); 659 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
659 } 660 }
660 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 661 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
661 if (mlx5_eswitch_inline_mode_get(esw, 662 if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
674 int vport; 675 int vport;
675 int err; 676 int err;
676 677
678 /* disable PF RoCE so missed packets don't go through RoCE steering */
679 mlx5_dev_list_lock();
680 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
681 mlx5_dev_list_unlock();
682
677 err = esw_create_offloads_fdb_table(esw, nvports); 683 err = esw_create_offloads_fdb_table(esw, nvports);
678 if (err) 684 if (err)
679 return err; 685 goto create_fdb_err;
680 686
681 err = esw_create_offloads_table(esw); 687 err = esw_create_offloads_table(esw);
682 if (err) 688 if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
696 goto err_reps; 702 goto err_reps;
697 } 703 }
698 704
699 /* disable PF RoCE so missed packets don't go through RoCE steering */
700 mlx5_dev_list_lock();
701 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
702 mlx5_dev_list_unlock();
703
704 return 0; 705 return 0;
705 706
706err_reps: 707err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
717 718
718create_ft_err: 719create_ft_err:
719 esw_destroy_offloads_fdb_table(esw); 720 esw_destroy_offloads_fdb_table(esw);
721
722create_fdb_err:
723 /* enable back PF RoCE */
724 mlx5_dev_list_lock();
725 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
726 mlx5_dev_list_unlock();
727
720 return err; 728 return err;
721} 729}
722 730
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
724{ 732{
725 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 733 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
726 734
727 /* enable back PF RoCE */
728 mlx5_dev_list_lock();
729 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
730 mlx5_dev_list_unlock();
731
732 mlx5_eswitch_disable_sriov(esw); 735 mlx5_eswitch_disable_sriov(esw);
733 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 736 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
734 if (err) { 737 if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
738 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); 741 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
739 } 742 }
740 743
744 /* enable back PF RoCE */
745 mlx5_dev_list_lock();
746 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
747 mlx5_dev_list_unlock();
748
741 return err; 749 return err;
742} 750}
743 751
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b53fc85a2375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
322 flow_table_properties_nic_receive. 322 flow_table_properties_nic_receive.
323 flow_modify_en); 323 flow_modify_en);
324 if (!atomic_mod_cap) 324 if (!atomic_mod_cap)
325 return -ENOTSUPP; 325 return -EOPNOTSUPP;
326 opmod = 1; 326 opmod = 1;
327 327
328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); 328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2fc916c..6346a8f5883b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1822 struct mlx5_flow_table *ft; 1822 struct mlx5_flow_table *ft;
1823 1823
1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); 1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1825 if (!ns) 1825 if (WARN_ON(!ns))
1826 return -EINVAL; 1826 return -EINVAL;
1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); 1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
1828 if (IS_ERR(ft)) { 1828 if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d01e9f21d469..3c315eb8d270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
807 return 0; 807 return 0;
808 } 808 }
809 809
810 return -ENOTSUPP; 810 return -EOPNOTSUPP;
811} 811}
812 812
813 813
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..fd12e0a377a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
620 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 620 u32 out[MLX5_ST_SZ_DW(qtct_reg)];
621 621
622 if (!MLX5_CAP_GEN(mdev, ets)) 622 if (!MLX5_CAP_GEN(mdev, ets))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), 625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
626 MLX5_REG_QETCR, 0, 1); 626 MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
632 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 632 u32 in[MLX5_ST_SZ_DW(qtct_reg)];
633 633
634 if (!MLX5_CAP_GEN(mdev, ets)) 634 if (!MLX5_CAP_GEN(mdev, ets))
635 return -ENOTSUPP; 635 return -EOPNOTSUPP;
636 636
637 memset(in, 0, sizeof(in)); 637 memset(in, 0, sizeof(in));
638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, 638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..7129c30a2ab4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
532 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 532 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
533 return -EACCES; 533 return -EACCES;
534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) 534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 return -ENOTSUPP; 535 return -EOPNOTSUPP;
536 536
537 in = mlx5_vzalloc(inlen); 537 in = mlx5_vzalloc(inlen);
538 if (!in) 538 if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 01d0efa9c5c7..9e494a446b7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1172 1172
1173static int 1173static int
1174mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, 1174mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1175 struct mlxsw_sp_nexthop_group *nh_grp) 1175 struct mlxsw_sp_nexthop_group *nh_grp,
1176 bool reallocate)
1176{ 1177{
1177 u32 adj_index = nh_grp->adj_index; /* base */ 1178 u32 adj_index = nh_grp->adj_index; /* base */
1178 struct mlxsw_sp_nexthop *nh; 1179 struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1187 continue; 1188 continue;
1188 } 1189 }
1189 1190
1190 if (nh->update) { 1191 if (nh->update || reallocate) {
1191 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, 1192 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1192 adj_index, nh); 1193 adj_index, nh);
1193 if (err) 1194 if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1248 /* Nothing was added or removed, so no need to reallocate. Just 1249 /* Nothing was added or removed, so no need to reallocate. Just
1249 * update MAC on existing adjacency indexes. 1250 * update MAC on existing adjacency indexes.
1250 */ 1251 */
1251 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); 1252 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1253 false);
1252 if (err) { 1254 if (err) {
1253 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 1255 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1254 goto set_trap; 1256 goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1276 nh_grp->adj_index_valid = 1; 1278 nh_grp->adj_index_valid = 1;
1277 nh_grp->adj_index = adj_index; 1279 nh_grp->adj_index = adj_index;
1278 nh_grp->ecmp_size = ecmp_size; 1280 nh_grp->ecmp_size = ecmp_size;
1279 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); 1281 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1280 if (err) { 1282 if (err) {
1281 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 1283 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1282 goto set_trap; 1284 goto set_trap;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 8e5cb7605b0f..873ce2cd76ba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -297,7 +297,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
297 list_del(&p_pkt->list_entry); 297 list_del(&p_pkt->list_entry);
298 b_last_packet = list_empty(&p_tx->active_descq); 298 b_last_packet = list_empty(&p_tx->active_descq);
299 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 299 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
300 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { 300 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
301 struct qed_ooo_buffer *p_buffer; 301 struct qed_ooo_buffer *p_buffer;
302 302
303 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 303 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
309 b_last_frag = 309 b_last_frag =
310 p_tx->cur_completing_bd_idx == p_pkt->bd_used; 310 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
311 tx_frag = p_pkt->bds_set[0].tx_frag; 311 tx_frag = p_pkt->bds_set[0].tx_frag;
312 if (p_ll2_conn->gsi_enable) 312 if (p_ll2_conn->conn.gsi_enable)
313 qed_ll2b_release_tx_gsi_packet(p_hwfn, 313 qed_ll2b_release_tx_gsi_packet(p_hwfn,
314 p_ll2_conn-> 314 p_ll2_conn->
315 my_id, 315 my_id,
@@ -378,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
378 378
379 spin_unlock_irqrestore(&p_tx->lock, flags); 379 spin_unlock_irqrestore(&p_tx->lock, flags);
380 tx_frag = p_pkt->bds_set[0].tx_frag; 380 tx_frag = p_pkt->bds_set[0].tx_frag;
381 if (p_ll2_conn->gsi_enable) 381 if (p_ll2_conn->conn.gsi_enable)
382 qed_ll2b_complete_tx_gsi_packet(p_hwfn, 382 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
383 p_ll2_conn->my_id, 383 p_ll2_conn->my_id,
384 p_pkt->cookie, 384 p_pkt->cookie,
@@ -550,7 +550,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
550 550
551 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 551 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
552 552
553 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { 553 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
554 struct qed_ooo_buffer *p_buffer; 554 struct qed_ooo_buffer *p_buffer;
555 555
556 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 556 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -738,7 +738,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
738 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, 738 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
739 p_buffer->vlan, bd_flags, 739 p_buffer->vlan, bd_flags,
740 l4_hdr_offset_w, 740 l4_hdr_offset_w,
741 p_ll2_conn->tx_dest, 0, 741 p_ll2_conn->conn.tx_dest, 0,
742 first_frag, 742 first_frag,
743 p_buffer->packet_length, 743 p_buffer->packet_length,
744 p_buffer, true); 744 p_buffer, true);
@@ -858,7 +858,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
858 u16 buf_idx; 858 u16 buf_idx;
859 int rc = 0; 859 int rc = 0;
860 860
861 if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) 861 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
862 return rc; 862 return rc;
863 863
864 if (!rx_num_ooo_buffers) 864 if (!rx_num_ooo_buffers)
@@ -901,7 +901,7 @@ static void
901qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, 901qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
902 struct qed_ll2_info *p_ll2_conn) 902 struct qed_ll2_info *p_ll2_conn)
903{ 903{
904 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) 904 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
905 return; 905 return;
906 906
907 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 907 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -913,7 +913,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
913{ 913{
914 struct qed_ooo_buffer *p_buffer; 914 struct qed_ooo_buffer *p_buffer;
915 915
916 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) 916 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
917 return; 917 return;
918 918
919 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 919 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -945,23 +945,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
945{ 945{
946 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 946 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
947 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 947 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
948 struct qed_ll2_info *ll2_info; 948 struct qed_ll2_conn ll2_info;
949 int rc; 949 int rc;
950 950
951 ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); 951 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
952 if (!ll2_info) 952 ll2_info.mtu = params->mtu;
953 return -ENOMEM; 953 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
954 ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; 954 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
955 ll2_info->mtu = params->mtu; 955 ll2_info.tx_tc = OOO_LB_TC;
956 ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; 956 ll2_info.tx_dest = CORE_TX_DEST_LB;
957 ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; 957
958 ll2_info->tx_tc = OOO_LB_TC; 958 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
959 ll2_info->tx_dest = CORE_TX_DEST_LB;
960
961 rc = qed_ll2_acquire_connection(hwfn, ll2_info,
962 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, 959 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
963 handle); 960 handle);
964 kfree(ll2_info);
965 if (rc) { 961 if (rc) {
966 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); 962 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
967 goto out; 963 goto out;
@@ -1006,7 +1002,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1006 struct qed_ll2_info *p_ll2_conn, 1002 struct qed_ll2_info *p_ll2_conn,
1007 u8 action_on_error) 1003 u8 action_on_error)
1008{ 1004{
1009 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; 1005 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1010 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 1006 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1011 struct core_rx_start_ramrod_data *p_ramrod = NULL; 1007 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1012 struct qed_spq_entry *p_ent = NULL; 1008 struct qed_spq_entry *p_ent = NULL;
@@ -1032,7 +1028,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1032 p_ramrod->sb_index = p_rx->rx_sb_index; 1028 p_ramrod->sb_index = p_rx->rx_sb_index;
1033 p_ramrod->complete_event_flg = 1; 1029 p_ramrod->complete_event_flg = 1;
1034 1030
1035 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); 1031 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1036 DMA_REGPAIR_LE(p_ramrod->bd_base, 1032 DMA_REGPAIR_LE(p_ramrod->bd_base,
1037 p_rx->rxq_chain.p_phys_addr); 1033 p_rx->rxq_chain.p_phys_addr);
1038 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); 1034 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1040,8 +1036,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1040 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, 1036 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1041 qed_chain_get_pbl_phys(&p_rx->rcq_chain)); 1037 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1042 1038
1043 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; 1039 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1044 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; 1040 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
1045 p_ramrod->queue_id = p_ll2_conn->queue_id; 1041 p_ramrod->queue_id = p_ll2_conn->queue_id;
1046 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 1042 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1047 : 1; 1043 : 1;
@@ -1056,14 +1052,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1056 } 1052 }
1057 1053
1058 p_ramrod->action_on_error.error_type = action_on_error; 1054 p_ramrod->action_on_error.error_type = action_on_error;
1059 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; 1055 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1060 return qed_spq_post(p_hwfn, p_ent, NULL); 1056 return qed_spq_post(p_hwfn, p_ent, NULL);
1061} 1057}
1062 1058
1063static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, 1059static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1064 struct qed_ll2_info *p_ll2_conn) 1060 struct qed_ll2_info *p_ll2_conn)
1065{ 1061{
1066 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; 1062 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1067 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1063 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1068 struct core_tx_start_ramrod_data *p_ramrod = NULL; 1064 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1069 struct qed_spq_entry *p_ent = NULL; 1065 struct qed_spq_entry *p_ent = NULL;
@@ -1075,7 +1071,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1075 if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) 1071 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1076 return 0; 1072 return 0;
1077 1073
1078 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) 1074 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1079 p_ll2_conn->tx_stats_en = 0; 1075 p_ll2_conn->tx_stats_en = 0;
1080 else 1076 else
1081 p_ll2_conn->tx_stats_en = 1; 1077 p_ll2_conn->tx_stats_en = 1;
@@ -1096,7 +1092,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1096 1092
1097 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); 1093 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1098 p_ramrod->sb_index = p_tx->tx_sb_index; 1094 p_ramrod->sb_index = p_tx->tx_sb_index;
1099 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); 1095 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1100 p_ramrod->stats_en = p_ll2_conn->tx_stats_en; 1096 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1101 p_ramrod->stats_id = p_ll2_conn->tx_stats_id; 1097 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1102 1098
@@ -1106,7 +1102,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1106 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1102 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1107 1103
1108 memset(&pq_params, 0, sizeof(pq_params)); 1104 memset(&pq_params, 0, sizeof(pq_params));
1109 pq_params.core.tc = p_ll2_conn->tx_tc; 1105 pq_params.core.tc = p_ll2_conn->conn.tx_tc;
1110 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); 1106 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1111 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1107 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1112 1108
@@ -1123,7 +1119,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1123 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); 1119 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1124 } 1120 }
1125 1121
1126 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; 1122 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1127 return qed_spq_post(p_hwfn, p_ent, NULL); 1123 return qed_spq_post(p_hwfn, p_ent, NULL);
1128} 1124}
1129 1125
@@ -1224,7 +1220,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1224 1220
1225 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1221 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1226 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", 1222 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1227 p_ll2_info->conn_type, rx_num_desc); 1223 p_ll2_info->conn.conn_type, rx_num_desc);
1228 1224
1229out: 1225out:
1230 return rc; 1226 return rc;
@@ -1262,7 +1258,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1262 1258
1263 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1259 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1264 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", 1260 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1265 p_ll2_info->conn_type, tx_num_desc); 1261 p_ll2_info->conn.conn_type, tx_num_desc);
1266 1262
1267out: 1263out:
1268 if (rc) 1264 if (rc)
@@ -1273,7 +1269,7 @@ out:
1273} 1269}
1274 1270
1275int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, 1271int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1276 struct qed_ll2_info *p_params, 1272 struct qed_ll2_conn *p_params,
1277 u16 rx_num_desc, 1273 u16 rx_num_desc,
1278 u16 tx_num_desc, 1274 u16 tx_num_desc,
1279 u8 *p_connection_handle) 1275 u8 *p_connection_handle)
@@ -1302,15 +1298,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1302 if (!p_ll2_info) 1298 if (!p_ll2_info)
1303 return -EBUSY; 1299 return -EBUSY;
1304 1300
1305 p_ll2_info->conn_type = p_params->conn_type; 1301 p_ll2_info->conn = *p_params;
1306 p_ll2_info->mtu = p_params->mtu;
1307 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
1308 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
1309 p_ll2_info->tx_tc = p_params->tx_tc;
1310 p_ll2_info->tx_dest = p_params->tx_dest;
1311 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
1312 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
1313 p_ll2_info->gsi_enable = p_params->gsi_enable;
1314 1302
1315 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); 1303 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1316 if (rc) 1304 if (rc)
@@ -1371,9 +1359,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1371 1359
1372 SET_FIELD(action_on_error, 1360 SET_FIELD(action_on_error,
1373 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, 1361 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1374 p_ll2_conn->ai_err_packet_too_big); 1362 p_ll2_conn->conn.ai_err_packet_too_big);
1375 SET_FIELD(action_on_error, 1363 SET_FIELD(action_on_error,
1376 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf); 1364 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
1377 1365
1378 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); 1366 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1379} 1367}
@@ -1600,7 +1588,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1600 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", 1588 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1601 p_ll2->queue_id, 1589 p_ll2->queue_id,
1602 p_ll2->cid, 1590 p_ll2->cid,
1603 p_ll2->conn_type, 1591 p_ll2->conn.conn_type,
1604 prod_idx, 1592 prod_idx,
1605 first_frag_len, 1593 first_frag_len,
1606 num_of_bds, 1594 num_of_bds,
@@ -1676,7 +1664,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1676 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), 1664 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1677 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", 1665 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1678 p_ll2_conn->queue_id, 1666 p_ll2_conn->queue_id,
1679 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod); 1667 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
1680} 1668}
1681 1669
1682int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, 1670int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1817,7 +1805,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1817 qed_ll2_rxq_flush(p_hwfn, connection_handle); 1805 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1818 } 1806 }
1819 1807
1820 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) 1808 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1821 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 1809 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1822 1810
1823 return rc; 1811 return rc;
@@ -1993,7 +1981,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1993 1981
1994static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) 1982static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1995{ 1983{
1996 struct qed_ll2_info ll2_info; 1984 struct qed_ll2_conn ll2_info;
1997 struct qed_ll2_buffer *buffer, *tmp_buffer; 1985 struct qed_ll2_buffer *buffer, *tmp_buffer;
1998 enum qed_ll2_conn_type conn_type; 1986 enum qed_ll2_conn_type conn_type;
1999 struct qed_ptt *p_ptt; 1987 struct qed_ptt *p_ptt;
@@ -2041,6 +2029,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2041 2029
2042 /* Prepare the temporary ll2 information */ 2030 /* Prepare the temporary ll2 information */
2043 memset(&ll2_info, 0, sizeof(ll2_info)); 2031 memset(&ll2_info, 0, sizeof(ll2_info));
2032
2044 ll2_info.conn_type = conn_type; 2033 ll2_info.conn_type = conn_type;
2045 ll2_info.mtu = params->mtu; 2034 ll2_info.mtu = params->mtu;
2046 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; 2035 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2120,7 +2109,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2120 } 2109 }
2121 2110
2122 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); 2111 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2123
2124 return 0; 2112 return 0;
2125 2113
2126release_terminate_all: 2114release_terminate_all:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 6625a3ae5a33..31417928b635 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -112,15 +112,8 @@ struct qed_ll2_tx_queue {
112 bool b_completing_packet; 112 bool b_completing_packet;
113}; 113};
114 114
115struct qed_ll2_info { 115struct qed_ll2_conn {
116 /* Lock protecting the state of LL2 */
117 struct mutex mutex;
118 enum qed_ll2_conn_type conn_type; 116 enum qed_ll2_conn_type conn_type;
119 u32 cid;
120 u8 my_id;
121 u8 queue_id;
122 u8 tx_stats_id;
123 bool b_active;
124 u16 mtu; 117 u16 mtu;
125 u8 rx_drop_ttl0_flg; 118 u8 rx_drop_ttl0_flg;
126 u8 rx_vlan_removal_en; 119 u8 rx_vlan_removal_en;
@@ -128,10 +121,21 @@ struct qed_ll2_info {
128 enum core_tx_dest tx_dest; 121 enum core_tx_dest tx_dest;
129 enum core_error_handle ai_err_packet_too_big; 122 enum core_error_handle ai_err_packet_too_big;
130 enum core_error_handle ai_err_no_buf; 123 enum core_error_handle ai_err_no_buf;
124 u8 gsi_enable;
125};
126
127struct qed_ll2_info {
128 /* Lock protecting the state of LL2 */
129 struct mutex mutex;
130 struct qed_ll2_conn conn;
131 u32 cid;
132 u8 my_id;
133 u8 queue_id;
134 u8 tx_stats_id;
135 bool b_active;
131 u8 tx_stats_en; 136 u8 tx_stats_en;
132 struct qed_ll2_rx_queue rx_queue; 137 struct qed_ll2_rx_queue rx_queue;
133 struct qed_ll2_tx_queue tx_queue; 138 struct qed_ll2_tx_queue tx_queue;
134 u8 gsi_enable;
135}; 139};
136 140
137/** 141/**
@@ -149,7 +153,7 @@ struct qed_ll2_info {
149 * @return 0 on success, failure otherwise 153 * @return 0 on success, failure otherwise
150 */ 154 */
151int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, 155int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
152 struct qed_ll2_info *p_params, 156 struct qed_ll2_conn *p_params,
153 u16 rx_num_desc, 157 u16 rx_num_desc,
154 u16 tx_num_desc, 158 u16 tx_num_desc,
155 u8 *p_connection_handle); 159 u8 *p_connection_handle);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2a16547c8966..2dbdb3298991 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
2632{ 2632{
2633 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2633 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2634 struct qed_roce_ll2_info *roce_ll2; 2634 struct qed_roce_ll2_info *roce_ll2;
2635 struct qed_ll2_info ll2_params; 2635 struct qed_ll2_conn ll2_params;
2636 int rc; 2636 int rc;
2637 2637
2638 if (!params) { 2638 if (!params) {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 89ac1e3f6175..301f48755093 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
179 .get_mdio_data = ravb_get_mdio_data, 179 .get_mdio_data = ravb_get_mdio_data,
180}; 180};
181 181
182/* Free TX skb function for AVB-IP */
183static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
184{
185 struct ravb_private *priv = netdev_priv(ndev);
186 struct net_device_stats *stats = &priv->stats[q];
187 struct ravb_tx_desc *desc;
188 int free_num = 0;
189 int entry;
190 u32 size;
191
192 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
193 bool txed;
194
195 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
196 NUM_TX_DESC);
197 desc = &priv->tx_ring[q][entry];
198 txed = desc->die_dt == DT_FEMPTY;
199 if (free_txed_only && !txed)
200 break;
201 /* Descriptor type must be checked before all other reads */
202 dma_rmb();
203 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
204 /* Free the original skb. */
205 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
206 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
207 size, DMA_TO_DEVICE);
208 /* Last packet descriptor? */
209 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
210 entry /= NUM_TX_DESC;
211 dev_kfree_skb_any(priv->tx_skb[q][entry]);
212 priv->tx_skb[q][entry] = NULL;
213 if (txed)
214 stats->tx_packets++;
215 }
216 free_num++;
217 }
218 if (txed)
219 stats->tx_bytes += size;
220 desc->die_dt = DT_EEMPTY;
221 }
222 return free_num;
223}
224
182/* Free skb's and DMA buffers for Ethernet AVB */ 225/* Free skb's and DMA buffers for Ethernet AVB */
183static void ravb_ring_free(struct net_device *ndev, int q) 226static void ravb_ring_free(struct net_device *ndev, int q)
184{ 227{
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
194 kfree(priv->rx_skb[q]); 237 kfree(priv->rx_skb[q]);
195 priv->rx_skb[q] = NULL; 238 priv->rx_skb[q] = NULL;
196 239
197 /* Free TX skb ringbuffer */
198 if (priv->tx_skb[q]) {
199 for (i = 0; i < priv->num_tx_ring[q]; i++)
200 dev_kfree_skb(priv->tx_skb[q][i]);
201 }
202 kfree(priv->tx_skb[q]);
203 priv->tx_skb[q] = NULL;
204
205 /* Free aligned TX buffers */ 240 /* Free aligned TX buffers */
206 kfree(priv->tx_align[q]); 241 kfree(priv->tx_align[q]);
207 priv->tx_align[q] = NULL; 242 priv->tx_align[q] = NULL;
208 243
209 if (priv->rx_ring[q]) { 244 if (priv->rx_ring[q]) {
245 for (i = 0; i < priv->num_rx_ring[q]; i++) {
246 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
247
248 if (!dma_mapping_error(ndev->dev.parent,
249 le32_to_cpu(desc->dptr)))
250 dma_unmap_single(ndev->dev.parent,
251 le32_to_cpu(desc->dptr),
252 PKT_BUF_SZ,
253 DMA_FROM_DEVICE);
254 }
210 ring_size = sizeof(struct ravb_ex_rx_desc) * 255 ring_size = sizeof(struct ravb_ex_rx_desc) *
211 (priv->num_rx_ring[q] + 1); 256 (priv->num_rx_ring[q] + 1);
212 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], 257 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
215 } 260 }
216 261
217 if (priv->tx_ring[q]) { 262 if (priv->tx_ring[q]) {
263 ravb_tx_free(ndev, q, false);
264
218 ring_size = sizeof(struct ravb_tx_desc) * 265 ring_size = sizeof(struct ravb_tx_desc) *
219 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 266 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
220 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
221 priv->tx_desc_dma[q]); 268 priv->tx_desc_dma[q]);
222 priv->tx_ring[q] = NULL; 269 priv->tx_ring[q] = NULL;
223 } 270 }
271
272 /* Free TX skb ringbuffer.
273 * SKBs are freed by ravb_tx_free() call above.
274 */
275 kfree(priv->tx_skb[q]);
276 priv->tx_skb[q] = NULL;
224} 277}
225 278
226/* Format skb and descriptor buffer for Ethernet AVB */ 279/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
431 return 0; 484 return 0;
432} 485}
433 486
434/* Free TX skb function for AVB-IP */
435static int ravb_tx_free(struct net_device *ndev, int q)
436{
437 struct ravb_private *priv = netdev_priv(ndev);
438 struct net_device_stats *stats = &priv->stats[q];
439 struct ravb_tx_desc *desc;
440 int free_num = 0;
441 int entry;
442 u32 size;
443
444 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
445 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
446 NUM_TX_DESC);
447 desc = &priv->tx_ring[q][entry];
448 if (desc->die_dt != DT_FEMPTY)
449 break;
450 /* Descriptor type must be checked before all other reads */
451 dma_rmb();
452 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
453 /* Free the original skb. */
454 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
455 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
456 size, DMA_TO_DEVICE);
457 /* Last packet descriptor? */
458 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
459 entry /= NUM_TX_DESC;
460 dev_kfree_skb_any(priv->tx_skb[q][entry]);
461 priv->tx_skb[q][entry] = NULL;
462 stats->tx_packets++;
463 }
464 free_num++;
465 }
466 stats->tx_bytes += size;
467 desc->die_dt = DT_EEMPTY;
468 }
469 return free_num;
470}
471
472static void ravb_get_tx_tstamp(struct net_device *ndev) 487static void ravb_get_tx_tstamp(struct net_device *ndev)
473{ 488{
474 struct ravb_private *priv = netdev_priv(ndev); 489 struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
902 spin_lock_irqsave(&priv->lock, flags); 917 spin_lock_irqsave(&priv->lock, flags);
903 /* Clear TX interrupt */ 918 /* Clear TX interrupt */
904 ravb_write(ndev, ~mask, TIS); 919 ravb_write(ndev, ~mask, TIS);
905 ravb_tx_free(ndev, q); 920 ravb_tx_free(ndev, q, true);
906 netif_wake_subqueue(ndev, q); 921 netif_wake_subqueue(ndev, q);
907 mmiowb(); 922 mmiowb();
908 spin_unlock_irqrestore(&priv->lock, flags); 923 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1567 1582
1568 priv->cur_tx[q] += NUM_TX_DESC; 1583 priv->cur_tx[q] += NUM_TX_DESC;
1569 if (priv->cur_tx[q] - priv->dirty_tx[q] > 1584 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1570 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) 1585 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1586 !ravb_tx_free(ndev, q, true))
1571 netif_stop_subqueue(ndev, q); 1587 netif_stop_subqueue(ndev, q);
1572 1588
1573exit: 1589exit:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index be3c91c7f211..5484fd726d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
305{ 305{
306 void __iomem *ioaddr = hw->pcsr; 306 void __iomem *ioaddr = hw->pcsr;
307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
308 u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
308 int ret = 0; 309 int ret = 0;
309 310
311 /* Discard masked bits */
312 intr_status &= ~intr_mask;
313
310 /* Not used events (e.g. MMC interrupts) are not handled. */ 314 /* Not used events (e.g. MMC interrupts) are not handled. */
311 if ((intr_status & GMAC_INT_STATUS_MMCTIS)) 315 if ((intr_status & GMAC_INT_STATUS_MMCTIS))
312 x->mmc_tx_irq_n++; 316 x->mmc_tx_irq_n++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 082cd48db6a7..36942f5a6a53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -351,6 +351,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
351 if (of_phy_is_fixed_link(np)) 351 if (of_phy_is_fixed_link(np))
352 of_phy_deregister_fixed_link(np); 352 of_phy_deregister_fixed_link(np);
353 of_node_put(plat->phy_node); 353 of_node_put(plat->phy_node);
354 of_node_put(plat->mdio_node);
354} 355}
355#else 356#else
356struct plat_stmmacenet_data * 357struct plat_stmmacenet_data *
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8b6810bad54b..99d3df788ce8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
69 struct socket *sock0; 69 struct socket *sock0;
70 struct socket *sock1u; 70 struct socket *sock1u;
71 71
72 struct net *net;
73 struct net_device *dev; 72 struct net_device *dev;
74 73
75 unsigned int hash_size; 74 unsigned int hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
316 315
317 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); 316 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
318 317
319 xnet = !net_eq(gtp->net, dev_net(gtp->dev)); 318 xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
320 319
321 switch (udp_sk(sk)->encap_type) { 320 switch (udp_sk(sk)->encap_type) {
322 case UDP_ENCAP_GTP0: 321 case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
612 pktinfo.fl4.saddr, pktinfo.fl4.daddr, 611 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
613 pktinfo.iph->tos, 612 pktinfo.iph->tos,
614 ip4_dst_hoplimit(&pktinfo.rt->dst), 613 ip4_dst_hoplimit(&pktinfo.rt->dst),
615 htons(IP_DF), 614 0,
616 pktinfo.gtph_port, pktinfo.gtph_port, 615 pktinfo.gtph_port, pktinfo.gtph_port,
617 true, false); 616 true, false);
618 break; 617 break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
658static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); 657static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
659static void gtp_hashtable_free(struct gtp_dev *gtp); 658static void gtp_hashtable_free(struct gtp_dev *gtp);
660static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 659static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
661 int fd_gtp0, int fd_gtp1, struct net *src_net); 660 int fd_gtp0, int fd_gtp1);
662 661
663static int gtp_newlink(struct net *src_net, struct net_device *dev, 662static int gtp_newlink(struct net *src_net, struct net_device *dev,
664 struct nlattr *tb[], struct nlattr *data[]) 663 struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
675 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); 674 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
676 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); 675 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
677 676
678 err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); 677 err = gtp_encap_enable(dev, gtp, fd0, fd1);
679 if (err < 0) 678 if (err < 0)
680 goto out_err; 679 goto out_err;
681 680
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
821} 820}
822 821
823static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 822static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
824 int fd_gtp0, int fd_gtp1, struct net *src_net) 823 int fd_gtp0, int fd_gtp1)
825{ 824{
826 struct udp_tunnel_sock_cfg tuncfg = {NULL}; 825 struct udp_tunnel_sock_cfg tuncfg = {NULL};
827 struct socket *sock0, *sock1u; 826 struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
858 857
859 gtp->sock0 = sock0; 858 gtp->sock0 = sock0;
860 gtp->sock1u = sock1u; 859 gtp->sock1u = sock1u;
861 gtp->net = src_net;
862 860
863 tuncfg.sk_user_data = gtp; 861 tuncfg.sk_user_data = gtp;
864 tuncfg.encap_rcv = gtp_encap_recv; 862 tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
1376MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); 1374MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1377MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); 1375MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1378MODULE_ALIAS_RTNL_LINK("gtp"); 1376MODULE_ALIAS_RTNL_LINK("gtp");
1377MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc089acb7..86e5749226ef 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
1295 ndev = hv_get_drvdata(device); 1295 ndev = hv_get_drvdata(device);
1296 buffer = get_per_channel_state(channel); 1296 buffer = get_per_channel_state(channel);
1297 1297
1298 /* commit_rd_index() -> hv_signal_on_read() needs this. */
1299 init_cached_read_index(channel);
1300
1298 do { 1301 do {
1299 desc = get_next_pkt_raw(channel); 1302 desc = get_next_pkt_raw(channel);
1300 if (desc != NULL) { 1303 if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
1347 1350
1348 bufferlen = bytes_recvd; 1351 bufferlen = bytes_recvd;
1349 } 1352 }
1353
1354 init_cached_read_index(channel);
1355
1350 } while (1); 1356 } while (1);
1351 1357
1352 if (bufferlen > NETVSC_PACKET_SIZE) 1358 if (bufferlen > NETVSC_PACKET_SIZE)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5c26653eceb5..402618565838 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
825 return -EINVAL; 825 return -EINVAL;
826 826
827 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, 827 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
828 macvtap_is_little_endian(q))) 828 macvtap_is_little_endian(q), true))
829 BUG(); 829 BUG();
830 830
831 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 831 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e741bf614c4e..b0492ef2cdaa 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
21MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 21MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
22MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
23 23
24static int bcm63xx_config_intr(struct phy_device *phydev)
25{
26 int reg, err;
27
28 reg = phy_read(phydev, MII_BCM63XX_IR);
29 if (reg < 0)
30 return reg;
31
32 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
33 reg &= ~MII_BCM63XX_IR_GMASK;
34 else
35 reg |= MII_BCM63XX_IR_GMASK;
36
37 err = phy_write(phydev, MII_BCM63XX_IR, reg);
38 return err;
39}
40
24static int bcm63xx_config_init(struct phy_device *phydev) 41static int bcm63xx_config_init(struct phy_device *phydev)
25{ 42{
26 int reg, err; 43 int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
55 .config_aneg = genphy_config_aneg, 72 .config_aneg = genphy_config_aneg,
56 .read_status = genphy_read_status, 73 .read_status = genphy_read_status,
57 .ack_interrupt = bcm_phy_ack_intr, 74 .ack_interrupt = bcm_phy_ack_intr,
58 .config_intr = bcm_phy_config_intr, 75 .config_intr = bcm63xx_config_intr,
59}, { 76}, {
60 /* same phy as above, with just a different OUI */ 77 /* same phy as above, with just a different OUI */
61 .phy_id = 0x002bdc00, 78 .phy_id = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
67 .config_aneg = genphy_config_aneg, 84 .config_aneg = genphy_config_aneg,
68 .read_status = genphy_read_status, 85 .read_status = genphy_read_status,
69 .ack_interrupt = bcm_phy_ack_intr, 86 .ack_interrupt = bcm_phy_ack_intr,
70 .config_intr = bcm_phy_config_intr, 87 .config_intr = bcm63xx_config_intr,
71} }; 88} };
72 89
73module_phy_driver(bcm63xx_driver); 90module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f06279..a10d0e7fc5f7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
17#include <linux/phy.h> 17#include <linux/phy.h>
18 18
19#define TI_DP83848C_PHY_ID 0x20005ca0 19#define TI_DP83848C_PHY_ID 0x20005ca0
20#define TI_DP83620_PHY_ID 0x20005ce0
20#define NS_DP83848C_PHY_ID 0x20005c90 21#define NS_DP83848C_PHY_ID 0x20005c90
21#define TLK10X_PHY_ID 0x2000a210 22#define TLK10X_PHY_ID 0x2000a210
22#define TI_DP83822_PHY_ID 0x2000a240 23#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
77static struct mdio_device_id __maybe_unused dp83848_tbl[] = { 78static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
78 { TI_DP83848C_PHY_ID, 0xfffffff0 }, 79 { TI_DP83848C_PHY_ID, 0xfffffff0 },
79 { NS_DP83848C_PHY_ID, 0xfffffff0 }, 80 { NS_DP83848C_PHY_ID, 0xfffffff0 },
81 { TI_DP83620_PHY_ID, 0xfffffff0 },
80 { TLK10X_PHY_ID, 0xfffffff0 }, 82 { TLK10X_PHY_ID, 0xfffffff0 },
81 { TI_DP83822_PHY_ID, 0xfffffff0 }, 83 { TI_DP83822_PHY_ID, 0xfffffff0 },
82 { } 84 { }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
106static struct phy_driver dp83848_driver[] = { 108static struct phy_driver dp83848_driver[] = {
107 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), 109 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
108 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), 110 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
111 DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
109 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), 112 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
110 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), 113 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
111}; 114};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0b78210c0fa7..ed0d235cf850 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1679,6 +1679,8 @@ static struct phy_driver marvell_drivers[] = {
1679 .ack_interrupt = &marvell_ack_interrupt, 1679 .ack_interrupt = &marvell_ack_interrupt,
1680 .config_intr = &marvell_config_intr, 1680 .config_intr = &marvell_config_intr,
1681 .did_interrupt = &m88e1121_did_interrupt, 1681 .did_interrupt = &m88e1121_did_interrupt,
1682 .get_wol = &m88e1318_get_wol,
1683 .set_wol = &m88e1318_set_wol,
1682 .resume = &marvell_resume, 1684 .resume = &marvell_resume,
1683 .suspend = &marvell_suspend, 1685 .suspend = &marvell_suspend,
1684 .get_sset_count = marvell_get_sset_count, 1686 .get_sset_count = marvell_get_sset_count,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9a77289109b7..6742070ca676 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
1008 .get_stats = kszphy_get_stats, 1008 .get_stats = kszphy_get_stats,
1009 .suspend = genphy_suspend, 1009 .suspend = genphy_suspend,
1010 .resume = genphy_resume, 1010 .resume = genphy_resume,
1011}, {
1012 .phy_id = PHY_ID_KSZ8795,
1013 .phy_id_mask = MICREL_PHY_ID_MASK,
1014 .name = "Micrel KSZ8795",
1015 .features = PHY_BASIC_FEATURES,
1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
1017 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg,
1019 .read_status = ksz8873mll_read_status,
1020 .get_sset_count = kszphy_get_sset_count,
1021 .get_strings = kszphy_get_strings,
1022 .get_stats = kszphy_get_stats,
1023 .suspend = genphy_suspend,
1024 .resume = genphy_resume,
1011} }; 1025} };
1012 1026
1013module_phy_driver(ksphy_driver); 1027module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 48da6e93c3f7..7cc1b7dcfe05 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,7 @@
29#include <linux/mii.h> 29#include <linux/mii.h>
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/phy_led_triggers.h>
32#include <linux/timer.h> 33#include <linux/timer.h>
33#include <linux/workqueue.h> 34#include <linux/workqueue.h>
34#include <linux/mdio.h> 35#include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
649 * phy_trigger_machine - trigger the state machine to run 650 * phy_trigger_machine - trigger the state machine to run
650 * 651 *
651 * @phydev: the phy_device struct 652 * @phydev: the phy_device struct
653 * @sync: indicate whether we should wait for the workqueue cancelation
652 * 654 *
653 * Description: There has been a change in state which requires that the 655 * Description: There has been a change in state which requires that the
654 * state machine runs. 656 * state machine runs.
655 */ 657 */
656 658
657static void phy_trigger_machine(struct phy_device *phydev) 659static void phy_trigger_machine(struct phy_device *phydev, bool sync)
658{ 660{
659 cancel_delayed_work_sync(&phydev->state_queue); 661 if (sync)
662 cancel_delayed_work_sync(&phydev->state_queue);
663 else
664 cancel_delayed_work(&phydev->state_queue);
660 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 665 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
661} 666}
662 667
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
693 phydev->state = PHY_HALTED; 698 phydev->state = PHY_HALTED;
694 mutex_unlock(&phydev->lock); 699 mutex_unlock(&phydev->lock);
695 700
696 phy_trigger_machine(phydev); 701 phy_trigger_machine(phydev, false);
697} 702}
698 703
699/** 704/**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
840 } 845 }
841 846
842 /* reschedule state queue work to run as soon as possible */ 847 /* reschedule state queue work to run as soon as possible */
843 phy_trigger_machine(phydev); 848 phy_trigger_machine(phydev, true);
844 return; 849 return;
845 850
846ignore: 851ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
942 if (do_resume) 947 if (do_resume)
943 phy_resume(phydev); 948 phy_resume(phydev);
944 949
945 phy_trigger_machine(phydev); 950 phy_trigger_machine(phydev, true);
946} 951}
947EXPORT_SYMBOL(phy_start); 952EXPORT_SYMBOL(phy_start);
948 953
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index fa62bdf2f526..94ca42e630bb 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/leds.h> 13#include <linux/leds.h>
14#include <linux/phy.h> 14#include <linux/phy.h>
15#include <linux/phy_led_triggers.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16 17
17static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, 18static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
102 sizeof(struct phy_led_trigger) * 103 sizeof(struct phy_led_trigger) *
103 phy->phy_num_led_triggers, 104 phy->phy_num_led_triggers,
104 GFP_KERNEL); 105 GFP_KERNEL);
105 if (!phy->phy_led_triggers) 106 if (!phy->phy_led_triggers) {
106 return -ENOMEM; 107 err = -ENOMEM;
108 goto out_clear;
109 }
107 110
108 for (i = 0; i < phy->phy_num_led_triggers; i++) { 111 for (i = 0; i < phy->phy_num_led_triggers; i++) {
109 err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], 112 err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
120 while (i--) 123 while (i--)
121 phy_led_trigger_unregister(&phy->phy_led_triggers[i]); 124 phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
122 devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); 125 devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
126out_clear:
127 phy->phy_num_led_triggers = 0;
123 return err; 128 return err;
124} 129}
125EXPORT_SYMBOL_GPL(phy_led_triggers_register); 130EXPORT_SYMBOL_GPL(phy_led_triggers_register);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cd8e02c94be0..2cd10b26b650 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1360,7 +1360,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1360 return -EINVAL; 1360 return -EINVAL;
1361 1361
1362 if (virtio_net_hdr_from_skb(skb, &gso, 1362 if (virtio_net_hdr_from_skb(skb, &gso,
1363 tun_is_little_endian(tun))) { 1363 tun_is_little_endian(tun), true)) {
1364 struct skb_shared_info *sinfo = skb_shinfo(skb); 1364 struct skb_shared_info *sinfo = skb_shinfo(skb);
1365 pr_err("unexpected GSO type: " 1365 pr_err("unexpected GSO type: "
1366 "0x%x, gso_size %d, hdr_len %d\n", 1366 "0x%x, gso_size %d, hdr_len %d\n",
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe7b2886cb6b..86144f9a80ee 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
531#define SAMSUNG_VENDOR_ID 0x04e8 531#define SAMSUNG_VENDOR_ID 0x04e8
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0
534 535
535static const struct usb_device_id products[] = { 536static const struct usb_device_id products[] = {
536/* BLACKLIST !! 537/* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
677 .driver_info = 0, 678 .driver_info = 0,
678}, 679},
679 680
681/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
682{
683 USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
684 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
685 .driver_info = 0,
686},
687
680/* AnyDATA ADU960S - handled by qmi_wwan */ 688/* AnyDATA ADU960S - handled by qmi_wwan */
681{ 689{
682 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, 690 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6fe1cdb0174f..24d5272cdce5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
654 USB_CDC_PROTO_NONE), 654 USB_CDC_PROTO_NONE),
655 .driver_info = (unsigned long)&qmi_wwan_info, 655 .driver_info = (unsigned long)&qmi_wwan_info,
656 }, 656 },
657 { /* HP lt2523 (Novatel E371) */
658 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
659 USB_CLASS_COMM,
660 USB_CDC_SUBCLASS_ETHERNET,
661 USB_CDC_PROTO_NONE),
662 .driver_info = (unsigned long)&qmi_wwan_info,
663 },
657 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 664 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
658 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), 665 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
659 .driver_info = (unsigned long)&qmi_wwan_info, 666 .driver_info = (unsigned long)&qmi_wwan_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f3b48ad90865..ad42295356dd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
32#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
33 33
34/* Information for net */ 34/* Information for net */
35#define NET_VERSION "6" 35#define NET_VERSION "8"
36 36
37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 1936 napi_complete(napi);
1937 if (!list_empty(&tp->rx_done)) 1937 if (!list_empty(&tp->rx_done))
1938 napi_schedule(napi); 1938 napi_schedule(napi);
1939 else if (!skb_queue_empty(&tp->tx_queue) &&
1940 !list_empty(&tp->tx_free))
1941 napi_schedule(napi);
1939 } 1942 }
1940 1943
1941 return work_done; 1944 return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
3155 if (!netif_carrier_ok(netdev)) { 3158 if (!netif_carrier_ok(netdev)) {
3156 tp->rtl_ops.enable(tp); 3159 tp->rtl_ops.enable(tp);
3157 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 3160 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
3161 netif_stop_queue(netdev);
3158 napi_disable(&tp->napi); 3162 napi_disable(&tp->napi);
3159 netif_carrier_on(netdev); 3163 netif_carrier_on(netdev);
3160 rtl_start_rx(tp); 3164 rtl_start_rx(tp);
3161 napi_enable(&tp->napi); 3165 napi_enable(&tp->napi);
3166 netif_wake_queue(netdev);
3167 netif_info(tp, link, netdev, "carrier on\n");
3162 } 3168 }
3163 } else { 3169 } else {
3164 if (netif_carrier_ok(netdev)) { 3170 if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
3166 napi_disable(&tp->napi); 3172 napi_disable(&tp->napi);
3167 tp->rtl_ops.disable(tp); 3173 tp->rtl_ops.disable(tp);
3168 napi_enable(&tp->napi); 3174 napi_enable(&tp->napi);
3175 netif_info(tp, link, netdev, "carrier off\n");
3169 } 3176 }
3170 } 3177 }
3171} 3178}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
3515 if (!netif_running(netdev)) 3522 if (!netif_running(netdev))
3516 return 0; 3523 return 0;
3517 3524
3525 netif_stop_queue(netdev);
3518 napi_disable(&tp->napi); 3526 napi_disable(&tp->napi);
3519 clear_bit(WORK_ENABLE, &tp->flags); 3527 clear_bit(WORK_ENABLE, &tp->flags);
3520 usb_kill_urb(tp->intr_urb); 3528 usb_kill_urb(tp->intr_urb);
3521 cancel_delayed_work_sync(&tp->schedule); 3529 cancel_delayed_work_sync(&tp->schedule);
3522 if (netif_carrier_ok(netdev)) { 3530 if (netif_carrier_ok(netdev)) {
3523 netif_stop_queue(netdev);
3524 mutex_lock(&tp->control); 3531 mutex_lock(&tp->control);
3525 tp->rtl_ops.disable(tp); 3532 tp->rtl_ops.disable(tp);
3526 mutex_unlock(&tp->control); 3533 mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
3545 if (netif_carrier_ok(netdev)) { 3552 if (netif_carrier_ok(netdev)) {
3546 mutex_lock(&tp->control); 3553 mutex_lock(&tp->control);
3547 tp->rtl_ops.enable(tp); 3554 tp->rtl_ops.enable(tp);
3555 rtl_start_rx(tp);
3548 rtl8152_set_rx_mode(netdev); 3556 rtl8152_set_rx_mode(netdev);
3549 mutex_unlock(&tp->control); 3557 mutex_unlock(&tp->control);
3550 netif_wake_queue(netdev);
3551 } 3558 }
3552 3559
3553 napi_enable(&tp->napi); 3560 napi_enable(&tp->napi);
3561 netif_wake_queue(netdev);
3562 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3563
3564 if (!list_empty(&tp->rx_done))
3565 napi_schedule(&tp->napi);
3554 3566
3555 return 0; 3567 return 0;
3556} 3568}
@@ -3572,6 +3584,8 @@ static bool delay_autosuspend(struct r8152 *tp)
3572 */ 3584 */
3573 if (!sw_linking && tp->rtl_ops.in_nway(tp)) 3585 if (!sw_linking && tp->rtl_ops.in_nway(tp))
3574 return true; 3586 return true;
3587 else if (!skb_queue_empty(&tp->tx_queue))
3588 return true;
3575 else 3589 else
3576 return false; 3590 return false;
3577} 3591}
@@ -3581,10 +3595,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
3581 struct net_device *netdev = tp->netdev; 3595 struct net_device *netdev = tp->netdev;
3582 int ret = 0; 3596 int ret = 0;
3583 3597
3598 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3599 smp_mb__after_atomic();
3600
3584 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { 3601 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3585 u32 rcr = 0; 3602 u32 rcr = 0;
3586 3603
3587 if (delay_autosuspend(tp)) { 3604 if (delay_autosuspend(tp)) {
3605 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3606 smp_mb__after_atomic();
3588 ret = -EBUSY; 3607 ret = -EBUSY;
3589 goto out1; 3608 goto out1;
3590 } 3609 }
@@ -3601,6 +3620,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
3601 if (!(ocp_data & RXFIFO_EMPTY)) { 3620 if (!(ocp_data & RXFIFO_EMPTY)) {
3602 rxdy_gated_en(tp, false); 3621 rxdy_gated_en(tp, false);
3603 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); 3622 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
3623 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3624 smp_mb__after_atomic();
3604 ret = -EBUSY; 3625 ret = -EBUSY;
3605 goto out1; 3626 goto out1;
3606 } 3627 }
@@ -3620,8 +3641,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp)
3620 } 3641 }
3621 } 3642 }
3622 3643
3623 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3624
3625out1: 3644out1:
3626 return ret; 3645 return ret;
3627} 3646}
@@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
3677 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3696 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3678 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3697 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3679 tp->rtl_ops.autosuspend_en(tp, false); 3698 tp->rtl_ops.autosuspend_en(tp, false);
3680 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3681 napi_disable(&tp->napi); 3699 napi_disable(&tp->napi);
3682 set_bit(WORK_ENABLE, &tp->flags); 3700 set_bit(WORK_ENABLE, &tp->flags);
3683 if (netif_carrier_ok(tp->netdev)) 3701 if (netif_carrier_ok(tp->netdev))
3684 rtl_start_rx(tp); 3702 rtl_start_rx(tp);
3685 napi_enable(&tp->napi); 3703 napi_enable(&tp->napi);
3704 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3705 smp_mb__after_atomic();
3706 if (!list_empty(&tp->rx_done))
3707 napi_schedule(&tp->napi);
3686 } else { 3708 } else {
3687 tp->rtl_ops.up(tp); 3709 tp->rtl_ops.up(tp);
3688 netif_carrier_off(tp->netdev); 3710 netif_carrier_off(tp->netdev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a105006ca63..765c2d6358da 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,8 +48,16 @@ module_param(gso, bool, 0444);
48 */ 48 */
49DECLARE_EWMA(pkt_len, 1, 64) 49DECLARE_EWMA(pkt_len, 1, 64)
50 50
51/* With mergeable buffers we align buffer address and use the low bits to
52 * encode its true size. Buffer size is up to 1 page so we need to align to
53 * square root of page size to ensure we reserve enough bits to encode the true
54 * size.
55 */
56#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
57
51/* Minimum alignment for mergeable packet buffers. */ 58/* Minimum alignment for mergeable packet buffers. */
52#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) 59#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
60 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
53 61
54#define VIRTNET_DRIVER_VERSION "1.0.0" 62#define VIRTNET_DRIVER_VERSION "1.0.0"
55 63
@@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1104 hdr = skb_vnet_hdr(skb); 1112 hdr = skb_vnet_hdr(skb);
1105 1113
1106 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1114 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1107 virtio_is_little_endian(vi->vdev))) 1115 virtio_is_little_endian(vi->vdev), false))
1108 BUG(); 1116 BUG();
1109 1117
1110 if (vi->mergeable_rx_bufs) 1118 if (vi->mergeable_rx_bufs)
@@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1707 u16 xdp_qp = 0, curr_qp; 1715 u16 xdp_qp = 0, curr_qp;
1708 int i, err; 1716 int i, err;
1709 1717
1718 if (prog && prog->xdp_adjust_head) {
1719 netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
1720 return -EOPNOTSUPP;
1721 }
1722
1710 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 1723 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1711 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 1724 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1712 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 1725 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
1890 put_page(vi->rq[i].alloc_frag.page); 1903 put_page(vi->rq[i].alloc_frag.page);
1891} 1904}
1892 1905
1893static bool is_xdp_queue(struct virtnet_info *vi, int q) 1906static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1894{ 1907{
1908 /* For small receive mode always use kfree_skb variants */
1909 if (!vi->mergeable_rx_bufs)
1910 return false;
1911
1895 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1912 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1896 return false; 1913 return false;
1897 else if (q < vi->curr_queue_pairs) 1914 else if (q < vi->curr_queue_pairs)
@@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
1908 for (i = 0; i < vi->max_queue_pairs; i++) { 1925 for (i = 0; i < vi->max_queue_pairs; i++) {
1909 struct virtqueue *vq = vi->sq[i].vq; 1926 struct virtqueue *vq = vi->sq[i].vq;
1910 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1927 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1911 if (!is_xdp_queue(vi, i)) 1928 if (!is_xdp_raw_buffer_queue(vi, i))
1912 dev_kfree_skb(buf); 1929 dev_kfree_skb(buf);
1913 else 1930 else
1914 put_page(virt_to_head_page(buf)); 1931 put_page(virt_to_head_page(buf));
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca7196c40060..50b62db213b0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2268,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
2268 = container_of(p, struct vxlan_fdb, hlist); 2268 = container_of(p, struct vxlan_fdb, hlist);
2269 unsigned long timeout; 2269 unsigned long timeout;
2270 2270
2271 if (f->state & NUD_PERMANENT) 2271 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2272 continue; 2272 continue;
2273 2273
2274 timeout = f->used + vxlan->cfg.age_interval * HZ; 2274 timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2354,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
2354} 2354}
2355 2355
2356/* Purge the forwarding table */ 2356/* Purge the forwarding table */
2357static void vxlan_flush(struct vxlan_dev *vxlan) 2357static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2358{ 2358{
2359 unsigned int h; 2359 unsigned int h;
2360 2360
@@ -2364,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
2364 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2364 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2365 struct vxlan_fdb *f 2365 struct vxlan_fdb *f
2366 = container_of(p, struct vxlan_fdb, hlist); 2366 = container_of(p, struct vxlan_fdb, hlist);
2367 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2368 continue;
2367 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2369 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2368 if (!is_zero_ether_addr(f->eth_addr)) 2370 if (!is_zero_ether_addr(f->eth_addr))
2369 vxlan_fdb_destroy(vxlan, f); 2371 vxlan_fdb_destroy(vxlan, f);
@@ -2385,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
2385 2387
2386 del_timer_sync(&vxlan->age_timer); 2388 del_timer_sync(&vxlan->age_timer);
2387 2389
2388 vxlan_flush(vxlan); 2390 vxlan_flush(vxlan, false);
2389 vxlan_sock_release(vxlan); 2391 vxlan_sock_release(vxlan);
2390 2392
2391 return ret; 2393 return ret;
@@ -2890,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2890 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2892 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2891 if (!vxlan->cfg.dst_port) { 2893 if (!vxlan->cfg.dst_port) {
2892 if (conf->flags & VXLAN_F_GPE) 2894 if (conf->flags & VXLAN_F_GPE)
2893 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ 2895 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
2894 else 2896 else
2895 vxlan->cfg.dst_port = default_port; 2897 vxlan->cfg.dst_port = default_port;
2896 } 2898 }
@@ -3058,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3058 struct vxlan_dev *vxlan = netdev_priv(dev); 3060 struct vxlan_dev *vxlan = netdev_priv(dev);
3059 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3061 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3060 3062
3063 vxlan_flush(vxlan, true);
3064
3061 spin_lock(&vn->sock_lock); 3065 spin_lock(&vn->sock_lock);
3062 if (!hlist_unhashed(&vxlan->hlist)) 3066 if (!hlist_unhashed(&vxlan->hlist))
3063 hlist_del_rcu(&vxlan->hlist); 3067 hlist_del_rcu(&vxlan->hlist);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index d02ca1491d16..8d3e53fac1da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -91,7 +91,7 @@
91 91
92#define IWL8000_FW_PRE "iwlwifi-8000C-" 92#define IWL8000_FW_PRE "iwlwifi-8000C-"
93#define IWL8000_MODULE_FIRMWARE(api) \ 93#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 94 IWL8000_FW_PRE __stringify(api) ".ucode"
95 95
96#define IWL8265_FW_PRE "iwlwifi-8265-" 96#define IWL8265_FW_PRE "iwlwifi-8265-"
97#define IWL8265_MODULE_FIRMWARE(api) \ 97#define IWL8265_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 636c8b03e318..09e9e2e3ed04 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1164 .frame_limit = IWL_FRAME_LIMIT, 1164 .frame_limit = IWL_FRAME_LIMIT,
1165 }; 1165 };
1166 1166
1167 /* Make sure reserved queue is still marked as such (or allocated) */ 1167 /* Make sure reserved queue is still marked as such (if allocated) */
1168 mvm->queue_info[mvm_sta->reserved_queue].status = 1168 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1169 IWL_MVM_QUEUE_RESERVED; 1169 mvm->queue_info[mvm_sta->reserved_queue].status =
1170 IWL_MVM_QUEUE_RESERVED;
1170 1171
1171 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1172 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1172 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1173 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051be832e..bec7d9c46087 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
843 return; 843 return;
844 844
845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); 845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
846 thermal_zone_device_unregister(mvm->tz_device.tzone); 846 if (mvm->tz_device.tzone) {
847 mvm->tz_device.tzone = NULL; 847 thermal_zone_device_unregister(mvm->tz_device.tzone);
848 mvm->tz_device.tzone = NULL;
849 }
848} 850}
849 851
850static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) 852static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
853 return; 855 return;
854 856
855 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); 857 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
856 thermal_cooling_device_unregister(mvm->cooling_dev.cdev); 858 if (mvm->cooling_dev.cdev) {
857 mvm->cooling_dev.cdev = NULL; 859 thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
860 mvm->cooling_dev.cdev = NULL;
861 }
858} 862}
859#endif /* CONFIG_THERMAL */ 863#endif /* CONFIG_THERMAL */
860 864
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e30ffd29b7e9..579521327b03 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
221{ 221{
222 struct xenvif *vif = netdev_priv(dev); 222 struct xenvif *vif = netdev_priv(dev);
223 struct xenvif_queue *queue = NULL; 223 struct xenvif_queue *queue = NULL;
224 unsigned int num_queues = vif->num_queues;
225 unsigned long rx_bytes = 0; 224 unsigned long rx_bytes = 0;
226 unsigned long rx_packets = 0; 225 unsigned long rx_packets = 0;
227 unsigned long tx_bytes = 0; 226 unsigned long tx_bytes = 0;
228 unsigned long tx_packets = 0; 227 unsigned long tx_packets = 0;
229 unsigned int index; 228 unsigned int index;
230 229
230 spin_lock(&vif->lock);
231 if (vif->queues == NULL) 231 if (vif->queues == NULL)
232 goto out; 232 goto out;
233 233
234 /* Aggregate tx and rx stats from each queue */ 234 /* Aggregate tx and rx stats from each queue */
235 for (index = 0; index < num_queues; ++index) { 235 for (index = 0; index < vif->num_queues; ++index) {
236 queue = &vif->queues[index]; 236 queue = &vif->queues[index];
237 rx_bytes += queue->stats.rx_bytes; 237 rx_bytes += queue->stats.rx_bytes;
238 rx_packets += queue->stats.rx_packets; 238 rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
241 } 241 }
242 242
243out: 243out:
244 spin_unlock(&vif->lock);
245
244 vif->dev->stats.rx_bytes = rx_bytes; 246 vif->dev->stats.rx_bytes = rx_bytes;
245 vif->dev->stats.rx_packets = rx_packets; 247 vif->dev->stats.rx_packets = rx_packets;
246 vif->dev->stats.tx_bytes = tx_bytes; 248 vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3124eaec9427..85b742e1c42f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
493static void backend_disconnect(struct backend_info *be) 493static void backend_disconnect(struct backend_info *be)
494{ 494{
495 if (be->vif) { 495 if (be->vif) {
496 unsigned int queue_index;
497
496 xen_unregister_watchers(be->vif); 498 xen_unregister_watchers(be->vif);
497#ifdef CONFIG_DEBUG_FS 499#ifdef CONFIG_DEBUG_FS
498 xenvif_debugfs_delif(be->vif); 500 xenvif_debugfs_delif(be->vif);
499#endif /* CONFIG_DEBUG_FS */ 501#endif /* CONFIG_DEBUG_FS */
500 xenvif_disconnect_data(be->vif); 502 xenvif_disconnect_data(be->vif);
503 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
504 xenvif_deinit_queue(&be->vif->queues[queue_index]);
505
506 spin_lock(&be->vif->lock);
507 vfree(be->vif->queues);
508 be->vif->num_queues = 0;
509 be->vif->queues = NULL;
510 spin_unlock(&be->vif->lock);
511
501 xenvif_disconnect_ctrl(be->vif); 512 xenvif_disconnect_ctrl(be->vif);
502 } 513 }
503} 514}
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
1034err: 1045err:
1035 if (be->vif->num_queues > 0) 1046 if (be->vif->num_queues > 0)
1036 xenvif_disconnect_data(be->vif); /* Clean up existing queues */ 1047 xenvif_disconnect_data(be->vif); /* Clean up existing queues */
1048 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
1049 xenvif_deinit_queue(&be->vif->queues[queue_index]);
1037 vfree(be->vif->queues); 1050 vfree(be->vif->queues);
1038 be->vif->queues = NULL; 1051 be->vif->queues = NULL;
1039 be->vif->num_queues = 0; 1052 be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a479cd99911d..8315fe73ecd0 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
321 queue->rx.req_prod_pvt = req_prod; 321 queue->rx.req_prod_pvt = req_prod;
322 322
323 /* Not enough requests? Try again later. */ 323 /* Not enough requests? Try again later. */
324 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { 324 if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
325 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 325 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
326 return; 326 return;
327 } 327 }
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index fcc9dcfdf675..e65041c640cb 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1663 return 0; 1663 return 0;
1664 1664
1665 freq->sg_table.sgl = freq->first_sgl; 1665 freq->sg_table.sgl = freq->first_sgl;
1666 ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, 1666 ret = sg_alloc_table_chained(&freq->sg_table,
1667 freq->sg_table.sgl); 1667 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1668 if (ret) 1668 if (ret)
1669 return -ENOMEM; 1669 return -ENOMEM;
1670 1670
1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1672 WARN_ON(op->nents > rq->nr_phys_segments); 1672 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1675 op->nents, dir); 1675 op->nents, dir);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 6f5074153dcd..be8c800078e2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
631{ 631{
632 struct nvmet_subsys *subsys = to_subsys(item); 632 struct nvmet_subsys *subsys = to_subsys(item);
633 633
634 nvmet_subsys_del_ctrls(subsys);
634 nvmet_subsys_put(subsys); 635 nvmet_subsys_put(subsys);
635} 636}
636 637
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed655c9..fc5ba2f9e15f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", 200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
201 ctrl->cntlid, ctrl->kato); 201 ctrl->cntlid, ctrl->kato);
202 202
203 ctrl->ops->delete_ctrl(ctrl); 203 nvmet_ctrl_fatal_error(ctrl);
204} 204}
205 205
206static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) 206static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
816 list_del(&ctrl->subsys_entry); 816 list_del(&ctrl->subsys_entry);
817 mutex_unlock(&subsys->lock); 817 mutex_unlock(&subsys->lock);
818 818
819 flush_work(&ctrl->async_event_work);
820 cancel_work_sync(&ctrl->fatal_err_work);
821
819 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); 822 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
820 nvmet_subsys_put(subsys); 823 nvmet_subsys_put(subsys);
821 824
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
935 kfree(subsys); 938 kfree(subsys);
936} 939}
937 940
941void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
942{
943 struct nvmet_ctrl *ctrl;
944
945 mutex_lock(&subsys->lock);
946 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
947 ctrl->ops->delete_ctrl(ctrl);
948 mutex_unlock(&subsys->lock);
949}
950
938void nvmet_subsys_put(struct nvmet_subsys *subsys) 951void nvmet_subsys_put(struct nvmet_subsys *subsys)
939{ 952{
940 kref_put(&subsys->ref, nvmet_subsys_free); 953 kref_put(&subsys->ref, nvmet_subsys_free);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842f19c9..ba57f9852bde 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; 1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1315 struct fcnvme_ls_disconnect_acc *acc = 1315 struct fcnvme_ls_disconnect_acc *acc =
1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; 1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1317 struct nvmet_fc_tgt_queue *queue; 1317 struct nvmet_fc_tgt_queue *queue = NULL;
1318 struct nvmet_fc_tgt_assoc *assoc; 1318 struct nvmet_fc_tgt_assoc *assoc;
1319 int ret = 0; 1319 int ret = 0;
1320 bool del_assoc = false; 1320 bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1348 assoc = nvmet_fc_find_target_assoc(tgtport, 1348 assoc = nvmet_fc_find_target_assoc(tgtport,
1349 be64_to_cpu(rqst->associd.association_id)); 1349 be64_to_cpu(rqst->associd.association_id));
1350 iod->assoc = assoc; 1350 iod->assoc = assoc;
1351 if (!assoc) 1351 if (assoc) {
1352 if (rqst->discon_cmd.scope ==
1353 FCNVME_DISCONN_CONNECTION) {
1354 queue = nvmet_fc_find_target_queue(tgtport,
1355 be64_to_cpu(
1356 rqst->discon_cmd.id));
1357 if (!queue) {
1358 nvmet_fc_tgt_a_put(assoc);
1359 ret = VERR_NO_CONN;
1360 }
1361 }
1362 } else
1352 ret = VERR_NO_ASSOC; 1363 ret = VERR_NO_ASSOC;
1353 } 1364 }
1354 1365
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1373 FCNVME_LS_DISCONNECT); 1384 FCNVME_LS_DISCONNECT);
1374 1385
1375 1386
1376 if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { 1387 /* are we to delete a Connection ID (queue) */
1377 queue = nvmet_fc_find_target_queue(tgtport, 1388 if (queue) {
1378 be64_to_cpu(rqst->discon_cmd.id)); 1389 int qid = queue->qid;
1379 if (queue) {
1380 int qid = queue->qid;
1381 1390
1382 nvmet_fc_delete_target_queue(queue); 1391 nvmet_fc_delete_target_queue(queue);
1383 1392
1384 /* release the get taken by find_target_queue */ 1393 /* release the get taken by find_target_queue */
1385 nvmet_fc_tgt_q_put(queue); 1394 nvmet_fc_tgt_q_put(queue);
1386 1395
1387 /* tear association down if io queue terminated */ 1396 /* tear association down if io queue terminated */
1388 if (!qid) 1397 if (!qid)
1389 del_assoc = true; 1398 del_assoc = true;
1390 }
1391 } 1399 }
1392 1400
1393 /* release get taken in nvmet_fc_find_target_assoc */ 1401 /* release get taken in nvmet_fc_find_target_assoc */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1c944f..cc7ad06b43a7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
282struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 282struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
283 enum nvme_subsys_type type); 283 enum nvme_subsys_type type);
284void nvmet_subsys_put(struct nvmet_subsys *subsys); 284void nvmet_subsys_put(struct nvmet_subsys *subsys);
285void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
285 286
286struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); 287struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
287void nvmet_put_namespace(struct nvmet_ns *ns); 288void nvmet_put_namespace(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a78ac0..60990220bd83 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
438{ 438{
439 struct ib_recv_wr *bad_wr; 439 struct ib_recv_wr *bad_wr;
440 440
441 ib_dma_sync_single_for_device(ndev->device,
442 cmd->sge[0].addr, cmd->sge[0].length,
443 DMA_FROM_DEVICE);
444
441 if (ndev->srq) 445 if (ndev->srq)
442 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 446 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
443 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); 447 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
538 first_wr = &rsp->send_wr; 542 first_wr = &rsp->send_wr;
539 543
540 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 544 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
545
546 ib_dma_sync_single_for_device(rsp->queue->dev->device,
547 rsp->send_sge.addr, rsp->send_sge.length,
548 DMA_TO_DEVICE);
549
541 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 550 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
542 pr_err("sending cmd response failed\n"); 551 pr_err("sending cmd response failed\n");
543 nvmet_rdma_release_rsp(rsp); 552 nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
698 cmd->n_rdma = 0; 707 cmd->n_rdma = 0;
699 cmd->req.port = queue->port; 708 cmd->req.port = queue->port;
700 709
710
711 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713 DMA_FROM_DEVICE);
714 ib_dma_sync_single_for_cpu(queue->dev->device,
715 cmd->send_sge.addr, cmd->send_sge.length,
716 DMA_TO_DEVICE);
717
701 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 718 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
702 &queue->nvme_sq, &nvmet_rdma_ops)) 719 &queue->nvme_sq, &nvmet_rdma_ops))
703 return; 720 return;
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index dd6d4ccb41e4..3858b87fd0bb 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
293 p->irq = PARPORT_IRQ_NONE; 293 p->irq = PARPORT_IRQ_NONE;
294 } 294 }
295 if (p->irq != PARPORT_IRQ_NONE) { 295 if (p->irq != PARPORT_IRQ_NONE) {
296 printk(", irq %d", p->irq); 296 pr_cont(", irq %d", p->irq);
297 297
298 if (p->dma == PARPORT_DMA_AUTO) { 298 if (p->dma == PARPORT_DMA_AUTO) {
299 p->dma = PARPORT_DMA_NONE; 299 p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
303 is mandatory (see above) */ 303 is mandatory (see above) */
304 p->dma = PARPORT_DMA_NONE; 304 p->dma = PARPORT_DMA_NONE;
305 305
306 printk(" ["); 306 pr_cont(" [");
307#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} 307#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
308 { 308 {
309 int f = 0; 309 int f = 0;
310 printmode(PCSPP); 310 printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
315// printmode(DMA); 315// printmode(DMA);
316 } 316 }
317#undef printmode 317#undef printmode
318 printk("]\n"); 318 pr_cont("]\n");
319 319
320 if (p->irq != PARPORT_IRQ_NONE) { 320 if (p->irq != PARPORT_IRQ_NONE) {
321 if (request_irq (p->irq, parport_irq_handler, 321 if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..3dd8bcbb3011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
532 link = kzalloc(sizeof(*link), GFP_KERNEL); 532 link = kzalloc(sizeof(*link), GFP_KERNEL);
533 if (!link) 533 if (!link)
534 return NULL; 534 return NULL;
535
535 INIT_LIST_HEAD(&link->sibling); 536 INIT_LIST_HEAD(&link->sibling);
536 INIT_LIST_HEAD(&link->children); 537 INIT_LIST_HEAD(&link->children);
537 INIT_LIST_HEAD(&link->link); 538 INIT_LIST_HEAD(&link->link);
538 link->pdev = pdev; 539 link->pdev = pdev;
539 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { 540
541 /*
542 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
543 * hierarchies.
544 */
545 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
546 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
547 link->root = link;
548 } else {
540 struct pcie_link_state *parent; 549 struct pcie_link_state *parent;
550
541 parent = pdev->bus->parent->self->link_state; 551 parent = pdev->bus->parent->self->link_state;
542 if (!parent) { 552 if (!parent) {
543 kfree(link); 553 kfree(link);
544 return NULL; 554 return NULL;
545 } 555 }
556
546 link->parent = parent; 557 link->parent = parent;
558 link->root = link->parent->root;
547 list_add(&link->link, &parent->children); 559 list_add(&link->link, &parent->children);
548 } 560 }
549 /* Setup a pointer to the root port link */
550 if (!link->parent)
551 link->root = link;
552 else
553 link->root = link->parent->root;
554 561
555 list_add(&link->sibling, &link_list); 562 list_add(&link->sibling, &link_list);
556 pdev->link_state = link; 563 pdev->link_state = link;
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 09172043d589..c617ec49e9ed 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, 217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ 218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ 219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
220 BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ 220 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, 221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ 222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ 223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 37300634b7d2..d94aef17348b 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
731 int reg) 731 int reg)
732{ 732{
733 struct byt_community *comm = byt_get_community(vg, offset); 733 struct byt_community *comm = byt_get_community(vg, offset);
734 u32 reg_offset = 0; 734 u32 reg_offset;
735 735
736 if (!comm) 736 if (!comm)
737 return NULL; 737 return NULL;
738 738
739 offset -= comm->pin_base; 739 offset -= comm->pin_base;
740 if (reg == BYT_INT_STAT_REG) 740 switch (reg) {
741 case BYT_INT_STAT_REG:
741 reg_offset = (offset / 32) * 4; 742 reg_offset = (offset / 32) * 4;
742 else 743 break;
744 case BYT_DEBOUNCE_REG:
745 reg_offset = 0;
746 break;
747 default:
743 reg_offset = comm->pad_map[offset] * 16; 748 reg_offset = comm->pad_map[offset] * 16;
749 break;
750 }
744 751
745 return comm->reg_base + reg_offset + reg; 752 return comm->reg_base + reg_offset + reg;
746} 753}
@@ -1092,6 +1099,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1092 enum pin_config_param param = pinconf_to_config_param(*config); 1099 enum pin_config_param param = pinconf_to_config_param(*config);
1093 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 1100 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1094 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1101 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1102 void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1095 unsigned long flags; 1103 unsigned long flags;
1096 u32 conf, pull, val, debounce; 1104 u32 conf, pull, val, debounce;
1097 u16 arg = 0; 1105 u16 arg = 0;
@@ -1128,7 +1136,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1128 return -EINVAL; 1136 return -EINVAL;
1129 1137
1130 raw_spin_lock_irqsave(&vg->lock, flags); 1138 raw_spin_lock_irqsave(&vg->lock, flags);
1131 debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG)); 1139 debounce = readl(db_reg);
1132 raw_spin_unlock_irqrestore(&vg->lock, flags); 1140 raw_spin_unlock_irqrestore(&vg->lock, flags);
1133 1141
1134 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { 1142 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1184,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1176 unsigned int param, arg; 1184 unsigned int param, arg;
1177 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 1185 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1178 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1186 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1187 void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1179 unsigned long flags; 1188 unsigned long flags;
1180 u32 conf, val, debounce; 1189 u32 conf, val, debounce;
1181 int i, ret = 0; 1190 int i, ret = 0;
@@ -1238,36 +1247,44 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1238 1247
1239 break; 1248 break;
1240 case PIN_CONFIG_INPUT_DEBOUNCE: 1249 case PIN_CONFIG_INPUT_DEBOUNCE:
1241 debounce = readl(byt_gpio_reg(vg, offset, 1250 debounce = readl(db_reg);
1242 BYT_DEBOUNCE_REG)); 1251 debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
1243 conf &= ~BYT_DEBOUNCE_PULSE_MASK; 1252
1253 if (arg)
1254 conf |= BYT_DEBOUNCE_EN;
1255 else
1256 conf &= ~BYT_DEBOUNCE_EN;
1244 1257
1245 switch (arg) { 1258 switch (arg) {
1246 case 375: 1259 case 375:
1247 conf |= BYT_DEBOUNCE_PULSE_375US; 1260 debounce |= BYT_DEBOUNCE_PULSE_375US;
1248 break; 1261 break;
1249 case 750: 1262 case 750:
1250 conf |= BYT_DEBOUNCE_PULSE_750US; 1263 debounce |= BYT_DEBOUNCE_PULSE_750US;
1251 break; 1264 break;
1252 case 1500: 1265 case 1500:
1253 conf |= BYT_DEBOUNCE_PULSE_1500US; 1266 debounce |= BYT_DEBOUNCE_PULSE_1500US;
1254 break; 1267 break;
1255 case 3000: 1268 case 3000:
1256 conf |= BYT_DEBOUNCE_PULSE_3MS; 1269 debounce |= BYT_DEBOUNCE_PULSE_3MS;
1257 break; 1270 break;
1258 case 6000: 1271 case 6000:
1259 conf |= BYT_DEBOUNCE_PULSE_6MS; 1272 debounce |= BYT_DEBOUNCE_PULSE_6MS;
1260 break; 1273 break;
1261 case 12000: 1274 case 12000:
1262 conf |= BYT_DEBOUNCE_PULSE_12MS; 1275 debounce |= BYT_DEBOUNCE_PULSE_12MS;
1263 break; 1276 break;
1264 case 24000: 1277 case 24000:
1265 conf |= BYT_DEBOUNCE_PULSE_24MS; 1278 debounce |= BYT_DEBOUNCE_PULSE_24MS;
1266 break; 1279 break;
1267 default: 1280 default:
1268 ret = -EINVAL; 1281 if (arg)
1282 ret = -EINVAL;
1283 break;
1269 } 1284 }
1270 1285
1286 if (!ret)
1287 writel(debounce, db_reg);
1271 break; 1288 break;
1272 default: 1289 default:
1273 ret = -ENOTSUPP; 1290 ret = -ENOTSUPP;
@@ -1606,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1606 continue; 1623 continue;
1607 } 1624 }
1608 1625
1626 raw_spin_lock(&vg->lock);
1609 pending = readl(reg); 1627 pending = readl(reg);
1628 raw_spin_unlock(&vg->lock);
1610 for_each_set_bit(pin, &pending, 32) { 1629 for_each_set_bit(pin, &pending, 32) {
1611 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 1630 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
1612 generic_handle_irq(virq); 1631 generic_handle_irq(virq);
@@ -1617,6 +1636,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1617 1636
1618static void byt_gpio_irq_init_hw(struct byt_gpio *vg) 1637static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
1619{ 1638{
1639 struct gpio_chip *gc = &vg->chip;
1640 struct device *dev = &vg->pdev->dev;
1620 void __iomem *reg; 1641 void __iomem *reg;
1621 u32 base, value; 1642 u32 base, value;
1622 int i; 1643 int i;
@@ -1638,10 +1659,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
1638 } 1659 }
1639 1660
1640 value = readl(reg); 1661 value = readl(reg);
1641 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && 1662 if (value & BYT_DIRECT_IRQ_EN) {
1642 !(value & BYT_DIRECT_IRQ_EN)) { 1663 clear_bit(i, gc->irq_valid_mask);
1664 dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
1665 } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
1643 byt_gpio_clear_triggering(vg, i); 1666 byt_gpio_clear_triggering(vg, i);
1644 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); 1667 dev_dbg(dev, "disabling GPIO %d\n", i);
1645 } 1668 }
1646 } 1669 }
1647 1670
@@ -1680,6 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
1680 gc->can_sleep = false; 1703 gc->can_sleep = false;
1681 gc->parent = &vg->pdev->dev; 1704 gc->parent = &vg->pdev->dev;
1682 gc->ngpio = vg->soc_data->npins; 1705 gc->ngpio = vg->soc_data->npins;
1706 gc->irq_need_valid_mask = true;
1683 1707
1684#ifdef CONFIG_PM_SLEEP 1708#ifdef CONFIG_PM_SLEEP
1685 vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, 1709 vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 59cb7a6fc5be..901b356b09d7 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -19,7 +19,7 @@
19 19
20#define BXT_PAD_OWN 0x020 20#define BXT_PAD_OWN 0x020
21#define BXT_HOSTSW_OWN 0x080 21#define BXT_HOSTSW_OWN 0x080
22#define BXT_PADCFGLOCK 0x090 22#define BXT_PADCFGLOCK 0x060
23#define BXT_GPI_IE 0x110 23#define BXT_GPI_IE 0x110
24 24
25#define BXT_COMMUNITY(s, e) \ 25#define BXT_COMMUNITY(s, e) \
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e139672f1af..6df35dcb29ae 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
353 return 0; 353 return 0;
354} 354}
355 355
356static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
357{
358 u32 value;
359
360 value = readl(padcfg0);
361 if (input) {
362 value &= ~PADCFG0_GPIORXDIS;
363 value |= PADCFG0_GPIOTXDIS;
364 } else {
365 value &= ~PADCFG0_GPIOTXDIS;
366 value |= PADCFG0_GPIORXDIS;
367 }
368 writel(value, padcfg0);
369}
370
356static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, 371static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
357 struct pinctrl_gpio_range *range, 372 struct pinctrl_gpio_range *range,
358 unsigned pin) 373 unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
375 /* Disable SCI/SMI/NMI generation */ 390 /* Disable SCI/SMI/NMI generation */
376 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); 391 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
377 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); 392 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
378 /* Disable TX buffer and enable RX (this will be input) */
379 value &= ~PADCFG0_GPIORXDIS;
380 value |= PADCFG0_GPIOTXDIS;
381 writel(value, padcfg0); 393 writel(value, padcfg0);
382 394
395 /* Disable TX buffer and enable RX (this will be input) */
396 __intel_gpio_set_direction(padcfg0, true);
397
383 raw_spin_unlock_irqrestore(&pctrl->lock, flags); 398 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
384 399
385 return 0; 400 return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
392 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); 407 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
393 void __iomem *padcfg0; 408 void __iomem *padcfg0;
394 unsigned long flags; 409 unsigned long flags;
395 u32 value;
396 410
397 raw_spin_lock_irqsave(&pctrl->lock, flags); 411 raw_spin_lock_irqsave(&pctrl->lock, flags);
398 412
399 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); 413 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
400 414 __intel_gpio_set_direction(padcfg0, input);
401 value = readl(padcfg0);
402 if (input)
403 value |= PADCFG0_GPIOTXDIS;
404 else
405 value &= ~PADCFG0_GPIOTXDIS;
406 writel(value, padcfg0);
407 415
408 raw_spin_unlock_irqrestore(&pctrl->lock, flags); 416 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
409 417
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index b21896126f76..4d4ef42a39b5 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
794 unsigned int i; 794 unsigned int i;
795 int ret; 795 int ret;
796 796
797 if (!mrfld_buf_available(mp, pin))
798 return -ENOTSUPP;
799
797 for (i = 0; i < nconfigs; i++) { 800 for (i = 0; i < nconfigs; i++) {
798 switch (pinconf_to_config_param(configs[i])) { 801 switch (pinconf_to_config_param(configs[i])) {
799 case PIN_CONFIG_BIAS_DISABLE: 802 case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa3fefa..e0bca4df2a2f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
253static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; 253static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
254static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; 254static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
255static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; 255static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
256static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; 256static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
257static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), 257static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
258 PIN(GPIOAO_5, 0) };
259static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; 258static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
260static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; 259static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
261 260
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
498 GPIO_GROUP(GPIOAO_13, 0), 497 GPIO_GROUP(GPIOAO_13, 0),
499 498
500 /* bank AO */ 499 /* bank AO */
501 GROUP(uart_tx_ao_b, 0, 26), 500 GROUP(uart_tx_ao_b, 0, 24),
502 GROUP(uart_rx_ao_b, 0, 25), 501 GROUP(uart_rx_ao_b, 0, 25),
503 GROUP(uart_tx_ao_a, 0, 12), 502 GROUP(uart_tx_ao_a, 0, 12),
504 GROUP(uart_rx_ao_a, 0, 11), 503 GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 25694f7094c7..b69743b07a1d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
214static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; 214static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
215static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; 215static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
216static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; 216static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
217static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; 217static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
218static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), 218static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
219 PIN(GPIOAO_5, 0) };
220static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; 219static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
221static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; 220static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
222 221
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
409 GPIO_GROUP(GPIOAO_9, 0), 408 GPIO_GROUP(GPIOAO_9, 0),
410 409
411 /* bank AO */ 410 /* bank AO */
412 GROUP(uart_tx_ao_b, 0, 26), 411 GROUP(uart_tx_ao_b, 0, 24),
413 GROUP(uart_rx_ao_b, 0, 25), 412 GROUP(uart_rx_ao_b, 0, 25),
414 GROUP(uart_tx_ao_a, 0, 12), 413 GROUP(uart_tx_ao_a, 0, 12),
415 GROUP(uart_rx_ao_a, 0, 11), 414 GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c9a146948192..537b52055756 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
202 i = 128; 202 i = 128;
203 pin_num = AMD_GPIO_PINS_BANK2 + i; 203 pin_num = AMD_GPIO_PINS_BANK2 + i;
204 break; 204 break;
205 default:
206 return;
205 } 207 }
206 208
207 for (; i < pin_num; i++) { 209 for (; i < pin_num; i++) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0eb51e33cb1b..207a8de4e1ed 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
564 val = arg / 10 - 1; 564 val = arg / 10 - 1;
565 break; 565 break;
566 case PIN_CONFIG_BIAS_DISABLE: 566 case PIN_CONFIG_BIAS_DISABLE:
567 val = 0; 567 continue;
568 break;
569 case PIN_CONFIG_BIAS_PULL_UP: 568 case PIN_CONFIG_BIAS_PULL_UP:
570 if (arg == 0) 569 if (arg == 0)
571 return -EINVAL; 570 return -EINVAL;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index aa8bd9794683..96686336e3a3 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 0, 0, 0, 0}; 561 0, 0, 0, 0};
562static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, 562static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
563 41, 42, 45}; 563 41, 42, 45};
564static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; 564static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
565static const unsigned i2c0_pins[] = {63, 64}; 565static const unsigned i2c0_pins[] = {63, 64};
566static const int i2c0_muxvals[] = {0, 0}; 566static const int i2c0_muxvals[] = {0, 0};
567static const unsigned i2c1_pins[] = {65, 66}; 567static const unsigned i2c1_pins[] = {65, 66};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 410741acb3c9..f46ece2ce3c4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
813 case 8: 813 case 8:
814 case 7: 814 case 7:
815 case 6: 815 case 6:
816 case 1:
816 ideapad_input_report(priv, vpc_bit); 817 ideapad_input_report(priv, vpc_bit);
817 break; 818 break;
818 case 5: 819 case 5:
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 1fc0de870ff8..361770568ad0 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
77 77
78 input_set_capability(input, EV_KEY, KEY_POWER); 78 input_set_capability(input, EV_KEY, KEY_POWER);
79 79
80 error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, 80 error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
81 DRIVER_NAME, input); 81 DRIVER_NAME, input);
82 if (error) { 82 if (error) {
83 dev_err(&pdev->dev, "Unable to request irq %d for mfld power" 83 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 97b4c3a219c0..25f15df5c2d7 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
326 return 0; 326 return 0;
327 327
328fail_platform_mux_register: 328fail_platform_mux_register:
329 for (i--; i > 0 ; i--) 329 while (--i >= 0)
330 platform_device_unregister(priv->pdev_mux[i]); 330 platform_device_unregister(priv->pdev_mux[i]);
331 platform_device_unregister(priv->pdev_i2c); 331 platform_device_unregister(priv->pdev_i2c);
332fail_alloc: 332fail_alloc:
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
index cbf4d83a7271..25b176996cb7 100644
--- a/drivers/platform/x86/surface3-wmi.c
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
139 139
140static int s3_wmi_check_platform_device(struct device *dev, void *data) 140static int s3_wmi_check_platform_device(struct device *dev, void *data)
141{ 141{
142 struct acpi_device *adev, *ts_adev; 142 struct acpi_device *adev, *ts_adev = NULL;
143 acpi_handle handle; 143 acpi_handle handle;
144 acpi_status status; 144 acpi_status status;
145 145
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
244 return 0; 244 return 0;
245} 245}
246 246
247#ifdef CONFIG_PM 247static int __maybe_unused s3_wmi_resume(struct device *dev)
248static int s3_wmi_resume(struct device *dev)
249{ 248{
250 s3_wmi_send_lid_state(); 249 s3_wmi_send_lid_state();
251 return 0; 250 return 0;
252} 251}
253#endif
254static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); 252static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
255 253
256static struct platform_driver s3_wmi_driver = { 254static struct platform_driver s3_wmi_driver = {
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index e6a512ebeae2..a3ade9e4ef47 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
272 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, 272 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
273 BIT(3)), 273 BIT(3)),
274 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, 274 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
275 AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), 275 AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
276 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, 276 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
277 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), 277 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
278 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, 278 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index a43b0e8a438d..988a7472c2ab 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -30,9 +30,6 @@
30#include <linux/of_gpio.h> 30#include <linux/of_gpio.h>
31#include <linux/regulator/of_regulator.h> 31#include <linux/regulator/of_regulator.h>
32#include <linux/regulator/machine.h> 32#include <linux/regulator/machine.h>
33#include <linux/acpi.h>
34#include <linux/property.h>
35#include <linux/gpio/consumer.h>
36 33
37struct fixed_voltage_data { 34struct fixed_voltage_data {
38 struct regulator_desc desc; 35 struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
97 return config; 94 return config;
98} 95}
99 96
100/**
101 * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
102 * @dev: device requesting for fixed_voltage_config
103 * @desc: regulator description
104 *
105 * Populates fixed_voltage_config structure by extracting data through ACPI
106 * interface, returns a pointer to the populated structure of NULL if memory
107 * alloc fails.
108 */
109static struct fixed_voltage_config *
110acpi_get_fixed_voltage_config(struct device *dev,
111 const struct regulator_desc *desc)
112{
113 struct fixed_voltage_config *config;
114 const char *supply_name;
115 struct gpio_desc *gpiod;
116 int ret;
117
118 config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
119 if (!config)
120 return ERR_PTR(-ENOMEM);
121
122 ret = device_property_read_string(dev, "supply-name", &supply_name);
123 if (!ret)
124 config->supply_name = supply_name;
125
126 gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
127 if (IS_ERR(gpiod))
128 return ERR_PTR(-ENODEV);
129
130 config->gpio = desc_to_gpio(gpiod);
131 config->enable_high = device_property_read_bool(dev,
132 "enable-active-high");
133 gpiod_put(gpiod);
134
135 return config;
136}
137
138static struct regulator_ops fixed_voltage_ops = { 97static struct regulator_ops fixed_voltage_ops = {
139}; 98};
140 99
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
155 &drvdata->desc); 114 &drvdata->desc);
156 if (IS_ERR(config)) 115 if (IS_ERR(config))
157 return PTR_ERR(config); 116 return PTR_ERR(config);
158 } else if (ACPI_HANDLE(&pdev->dev)) {
159 config = acpi_get_fixed_voltage_config(&pdev->dev,
160 &drvdata->desc);
161 if (IS_ERR(config))
162 return PTR_ERR(config);
163 } else { 117 } else {
164 config = dev_get_platdata(&pdev->dev); 118 config = dev_get_platdata(&pdev->dev);
165 } 119 }
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 4864b9d742c0..716191046a70 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
452 vsel = 62; 452 vsel = 62;
453 else if ((min_uV > 1800000) && (min_uV <= 1900000)) 453 else if ((min_uV > 1800000) && (min_uV <= 1900000))
454 vsel = 61; 454 vsel = 61;
455 else if ((min_uV > 1350000) && (min_uV <= 1800000)) 455 else if ((min_uV > 1500000) && (min_uV <= 1800000))
456 vsel = 60; 456 vsel = 60;
457 else if ((min_uV > 1350000) && (min_uV <= 1500000)) 457 else if ((min_uV > 1350000) && (min_uV <= 1500000))
458 vsel = 59; 458 vsel = 59;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index c93c5a8fba32..5dc673dc9487 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
1551 will be called rtc-mpc5121. 1551 will be called rtc-mpc5121.
1552 1552
1553config RTC_DRV_JZ4740 1553config RTC_DRV_JZ4740
1554 bool "Ingenic JZ4740 SoC" 1554 tristate "Ingenic JZ4740 SoC"
1555 depends on MACH_INGENIC || COMPILE_TEST 1555 depends on MACH_INGENIC || COMPILE_TEST
1556 help 1556 help
1557 If you say yes here you get support for the Ingenic JZ47xx SoCs RTC 1557 If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
1558 controllers. 1558 controllers.
1559 1559
1560 This driver can also be buillt as a module. If so, the module
1561 will be called rtc-jz4740.
1562
1560config RTC_DRV_LPC24XX 1563config RTC_DRV_LPC24XX
1561 tristate "NXP RTC for LPC178x/18xx/408x/43xx" 1564 tristate "NXP RTC for LPC178x/18xx/408x/43xx"
1562 depends on ARCH_LPC18XX || COMPILE_TEST 1565 depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 72918c1ba092..64989afffa3d 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -17,6 +17,7 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
20#include <linux/of_device.h> 21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/reboot.h> 23#include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
294 JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks); 295 JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
295 296
296 jz4740_rtc_poweroff(dev_for_power_off); 297 jz4740_rtc_poweroff(dev_for_power_off);
297 machine_halt(); 298 kernel_halt();
298} 299}
299 300
300static const struct of_device_id jz4740_rtc_of_match[] = { 301static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
302 { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, 303 { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
303 {}, 304 {},
304}; 305};
306MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
305 307
306static int jz4740_rtc_probe(struct platform_device *pdev) 308static int jz4740_rtc_probe(struct platform_device *pdev)
307{ 309{
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
429 { "jz4780-rtc", ID_JZ4780 }, 431 { "jz4780-rtc", ID_JZ4780 },
430 {} 432 {}
431}; 433};
434MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
432 435
433static struct platform_driver jz4740_rtc_driver = { 436static struct platform_driver jz4740_rtc_driver = {
434 .probe = jz4740_rtc_probe, 437 .probe = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
440 .id_table = jz4740_rtc_ids, 443 .id_table = jz4740_rtc_ids,
441}; 444};
442 445
443builtin_platform_driver(jz4740_rtc_driver); 446module_platform_driver(jz4740_rtc_driver);
447
448MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
449MODULE_LICENSE("GPL");
450MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
451MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0b09638fa39b..1f5d92a25a49 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
836 struct bio *bio = rq->bio; 836 struct bio *bio = rq->bio;
837 sector_t sector = blk_rq_pos(rq); 837 sector_t sector = blk_rq_pos(rq);
838 unsigned int nr_sectors = blk_rq_sectors(rq); 838 unsigned int nr_sectors = blk_rq_sectors(rq);
839 unsigned int nr_bytes = blk_rq_bytes(rq);
839 int ret; 840 int ret;
840 841
841 if (sdkp->device->no_write_same) 842 if (sdkp->device->no_write_same)
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
868 869
869 cmd->transfersize = sdp->sector_size; 870 cmd->transfersize = sdp->sector_size;
870 cmd->allowed = SD_MAX_RETRIES; 871 cmd->allowed = SD_MAX_RETRIES;
871 return scsi_init_io(cmd); 872
873 /*
874 * For WRITE SAME the data transferred via the DATA OUT buffer is
875 * different from the amount of data actually written to the target.
876 *
877 * We set up __data_len to the amount of data transferred via the
878 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
879 * to transfer a single sector of data first, but then reset it to
880 * the amount of data to be written right after so that the I/O path
881 * knows how much to actually write.
882 */
883 rq->__data_len = sdp->sector_size;
884 ret = scsi_init_io(cmd);
885 rq->__data_len = nr_bytes;
886 return ret;
872} 887}
873 888
874static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) 889static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index ec91bd07f00a..c680d7641311 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
534{ 534{
535 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 535 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
536 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); 536 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
537 unsigned long flags;
537 int req_size; 538 int req_size;
539 int ret;
538 540
539 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); 541 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
540 542
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
562 req_size = sizeof(cmd->req.cmd); 564 req_size = sizeof(cmd->req.cmd);
563 } 565 }
564 566
565 if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) 567 ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
568 if (ret == -EIO) {
569 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
570 spin_lock_irqsave(&req_vq->vq_lock, flags);
571 virtscsi_complete_cmd(vscsi, cmd);
572 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
573 } else if (ret != 0) {
566 return SCSI_MLQUEUE_HOST_BUSY; 574 return SCSI_MLQUEUE_HOST_BUSY;
575 }
567 return 0; 576 return 0;
568} 577}
569 578
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
index 113f3d6c4b3a..27f75b17679b 100644
--- a/drivers/staging/greybus/timesync_platform.c
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
45 45
46int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) 46int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
47{ 47{
48 if (!arche_platform_change_state_cb)
49 return 0;
50
48 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, 51 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
49 pdata); 52 pdata);
50} 53}
51 54
52void gb_timesync_platform_unlock_bus(void) 55void gb_timesync_platform_unlock_bus(void)
53{ 56{
57 if (!arche_platform_change_state_cb)
58 return;
59
54 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); 60 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
55} 61}
56 62
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index c4a508a124dc..541af5946203 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -59,6 +59,14 @@ static LIST_HEAD(thermal_hwmon_list);
59static DEFINE_MUTEX(thermal_hwmon_list_lock); 59static DEFINE_MUTEX(thermal_hwmon_list_lock);
60 60
61static ssize_t 61static ssize_t
62name_show(struct device *dev, struct device_attribute *attr, char *buf)
63{
64 struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
65 return sprintf(buf, "%s\n", hwmon->type);
66}
67static DEVICE_ATTR_RO(name);
68
69static ssize_t
62temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) 70temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
63{ 71{
64 int temperature; 72 int temperature;
@@ -157,12 +165,15 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
157 165
158 INIT_LIST_HEAD(&hwmon->tz_list); 166 INIT_LIST_HEAD(&hwmon->tz_list);
159 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH); 167 strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
160 hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type, 168 hwmon->device = hwmon_device_register(NULL);
161 hwmon, NULL, NULL);
162 if (IS_ERR(hwmon->device)) { 169 if (IS_ERR(hwmon->device)) {
163 result = PTR_ERR(hwmon->device); 170 result = PTR_ERR(hwmon->device);
164 goto free_mem; 171 goto free_mem;
165 } 172 }
173 dev_set_drvdata(hwmon->device, hwmon);
174 result = device_create_file(hwmon->device, &dev_attr_name);
175 if (result)
176 goto free_mem;
166 177
167 register_sys_interface: 178 register_sys_interface:
168 temp = kzalloc(sizeof(*temp), GFP_KERNEL); 179 temp = kzalloc(sizeof(*temp), GFP_KERNEL);
@@ -211,8 +222,10 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
211 free_temp_mem: 222 free_temp_mem:
212 kfree(temp); 223 kfree(temp);
213 unregister_name: 224 unregister_name:
214 if (new_hwmon_device) 225 if (new_hwmon_device) {
226 device_remove_file(hwmon->device, &dev_attr_name);
215 hwmon_device_unregister(hwmon->device); 227 hwmon_device_unregister(hwmon->device);
228 }
216 free_mem: 229 free_mem:
217 if (new_hwmon_device) 230 if (new_hwmon_device)
218 kfree(hwmon); 231 kfree(hwmon);
@@ -254,6 +267,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
254 list_del(&hwmon->node); 267 list_del(&hwmon->node);
255 mutex_unlock(&thermal_hwmon_list_lock); 268 mutex_unlock(&thermal_hwmon_list_lock);
256 269
270 device_remove_file(hwmon->device, &dev_attr_name);
257 hwmon_device_unregister(hwmon->device); 271 hwmon_device_unregister(hwmon->device);
258 kfree(hwmon); 272 kfree(hwmon);
259} 273}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d2e50a27140c..24f9f98968a5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
37 /* CBM - Flash disk */ 37 /* CBM - Flash disk */
38 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 38 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
39 39
40 /* WORLDE easy key (easykey.25) MIDI controller */
41 { USB_DEVICE(0x0218, 0x0401), .driver_info =
42 USB_QUIRK_CONFIG_INTF_STRINGS },
43
40 /* HP 5300/5370C scanner */ 44 /* HP 5300/5370C scanner */
41 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 45 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
42 USB_QUIRK_STRING_FETCH_255 }, 46 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5490fc51638e..fd80c1b9c823 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2269 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) 2269 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2270 return -EINVAL; 2270 return -EINVAL;
2271 length = le32_to_cpu(d->dwSize); 2271 length = le32_to_cpu(d->dwSize);
2272 if (len < length)
2273 return -EINVAL;
2272 type = le32_to_cpu(d->dwPropertyDataType); 2274 type = le32_to_cpu(d->dwPropertyDataType);
2273 if (type < USB_EXT_PROP_UNICODE || 2275 if (type < USB_EXT_PROP_UNICODE ||
2274 type > USB_EXT_PROP_UNICODE_MULTI) { 2276 type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2277 return -EINVAL; 2279 return -EINVAL;
2278 } 2280 }
2279 pnl = le16_to_cpu(d->wPropertyNameLength); 2281 pnl = le16_to_cpu(d->wPropertyNameLength);
2282 if (length < 14 + pnl) {
2283 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2284 length, pnl, type);
2285 return -EINVAL;
2286 }
2280 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); 2287 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2281 if (length != 14 + pnl + pdl) { 2288 if (length != 14 + pnl + pdl) {
2282 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", 2289 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
2363 } 2370 }
2364 } 2371 }
2365 if (flags & (1 << i)) { 2372 if (flags & (1 << i)) {
2373 if (len < 4) {
2374 goto error;
2375 }
2366 os_descs_count = get_unaligned_le32(data); 2376 os_descs_count = get_unaligned_le32(data);
2367 data += 4; 2377 data += 4;
2368 len -= 4; 2378 len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
2435 2445
2436 ENTER(); 2446 ENTER();
2437 2447
2438 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || 2448 if (unlikely(len < 16 ||
2449 get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2439 get_unaligned_le32(data + 4) != len)) 2450 get_unaligned_le32(data + 4) != len))
2440 goto error; 2451 goto error;
2441 str_count = get_unaligned_le32(data + 8); 2452 str_count = get_unaligned_le32(data + 8);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fca288bbc800..772f15821242 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
594 | MUSB_PORT_STAT_RESUME; 594 | MUSB_PORT_STAT_RESUME;
595 musb->rh_timer = jiffies 595 musb->rh_timer = jiffies
596 + msecs_to_jiffies(USB_RESUME_TIMEOUT); 596 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
597 musb->need_finish_resume = 1;
598
599 musb->xceiv->otg->state = OTG_STATE_A_HOST; 597 musb->xceiv->otg->state = OTG_STATE_A_HOST;
600 musb->is_active = 1; 598 musb->is_active = 1;
601 musb_host_resume_root_hub(musb); 599 musb_host_resume_root_hub(musb);
600 schedule_delayed_work(&musb->finish_resume_work,
601 msecs_to_jiffies(USB_RESUME_TIMEOUT));
602 break; 602 break;
603 case OTG_STATE_B_WAIT_ACON: 603 case OTG_STATE_B_WAIT_ACON:
604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1925static void musb_irq_work(struct work_struct *data) 1925static void musb_irq_work(struct work_struct *data)
1926{ 1926{
1927 struct musb *musb = container_of(data, struct musb, irq_work.work); 1927 struct musb *musb = container_of(data, struct musb, irq_work.work);
1928 int error;
1929
1930 error = pm_runtime_get_sync(musb->controller);
1931 if (error < 0) {
1932 dev_err(musb->controller, "Could not enable: %i\n", error);
1933
1934 return;
1935 }
1928 1936
1929 musb_pm_runtime_check_session(musb); 1937 musb_pm_runtime_check_session(musb);
1930 1938
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
1932 musb->xceiv_old_state = musb->xceiv->otg->state; 1940 musb->xceiv_old_state = musb->xceiv->otg->state;
1933 sysfs_notify(&musb->controller->kobj, NULL, "mode"); 1941 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1934 } 1942 }
1943
1944 pm_runtime_mark_last_busy(musb->controller);
1945 pm_runtime_put_autosuspend(musb->controller);
1935} 1946}
1936 1947
1937static void musb_recover_from_babble(struct musb *musb) 1948static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
2710 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV; 2721 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2711 if ((devctl & mask) != (musb->context.devctl & mask)) 2722 if ((devctl & mask) != (musb->context.devctl & mask))
2712 musb->port1_status = 0; 2723 musb->port1_status = 0;
2713 if (musb->need_finish_resume) {
2714 musb->need_finish_resume = 0;
2715 schedule_delayed_work(&musb->finish_resume_work,
2716 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2717 }
2718 2724
2719 /* 2725 /*
2720 * The USB HUB code expects the device to be in RPM_ACTIVE once it came 2726 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
2766 2772
2767 musb_restore_context(musb); 2773 musb_restore_context(musb);
2768 2774
2769 if (musb->need_finish_resume) {
2770 musb->need_finish_resume = 0;
2771 schedule_delayed_work(&musb->finish_resume_work,
2772 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2773 }
2774
2775 spin_lock_irqsave(&musb->lock, flags); 2775 spin_lock_irqsave(&musb->lock, flags);
2776 error = musb_run_resume_work(musb); 2776 error = musb_run_resume_work(musb);
2777 if (error) 2777 if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index ade902ea1221..ce5a18c98c6d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -410,7 +410,6 @@ struct musb {
410 410
411 /* is_suspended means USB B_PERIPHERAL suspend */ 411 /* is_suspended means USB B_PERIPHERAL suspend */
412 unsigned is_suspended:1; 412 unsigned is_suspended:1;
413 unsigned need_finish_resume :1;
414 413
415 /* may_wakeup means remote wakeup is enabled */ 414 /* may_wakeup means remote wakeup is enabled */
416 unsigned may_wakeup:1; 415 unsigned may_wakeup:1;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7ce31a4c7e7f..42cc72e54c05 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
2007 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 2007 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2008 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, 2008 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
2009 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, 2009 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
2010 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
2010 { } /* Terminating entry */ 2011 { } /* Terminating entry */
2011}; 2012};
2012MODULE_DEVICE_TABLE(usb, option_ids); 2013MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 46fca6b75846..1db4b61bdf7b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, 51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
52 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
52 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 53 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
53 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, 54 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
54 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, 55 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index e3b7af8adfb7..09d9be88209e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
27#define ATEN_VENDOR_ID 0x0557 27#define ATEN_VENDOR_ID 0x0557
28#define ATEN_VENDOR_ID2 0x0547 28#define ATEN_VENDOR_ID2 0x0547
29#define ATEN_PRODUCT_ID 0x2008 29#define ATEN_PRODUCT_ID 0x2008
30#define ATEN_PRODUCT_ID2 0x2118
30 31
31#define IODATA_VENDOR_ID 0x04bb 32#define IODATA_VENDOR_ID 0x04bb
32#define IODATA_PRODUCT_ID 0x0a03 33#define IODATA_PRODUCT_ID 0x0a03
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1bc6089b9008..696458db7e3c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
124 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ 124 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
125 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ 125 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
126 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 126 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
127 {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
127 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ 128 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
128 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ 129 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
129 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ 130 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c8823578a1b2..7690e5bf3cf1 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
1123 mutex_lock(&container->lock); 1123 mutex_lock(&container->lock);
1124 1124
1125 ret = tce_iommu_create_default_window(container); 1125 ret = tce_iommu_create_default_window(container);
1126 if (ret) 1126 if (!ret)
1127 return ret; 1127 ret = tce_iommu_create_window(container,
1128 1128 create.page_shift,
1129 ret = tce_iommu_create_window(container, create.page_shift, 1129 create.window_size, create.levels,
1130 create.window_size, create.levels, 1130 &create.start_addr);
1131 &create.start_addr);
1132 1131
1133 mutex_unlock(&container->lock); 1132 mutex_unlock(&container->lock);
1134 1133
@@ -1270,6 +1269,10 @@ static int tce_iommu_attach_group(void *iommu_data,
1270 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", 1269 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1271 iommu_group_id(iommu_group), iommu_group); */ 1270 iommu_group_id(iommu_group), iommu_group); */
1272 table_group = iommu_group_get_iommudata(iommu_group); 1271 table_group = iommu_group_get_iommudata(iommu_group);
1272 if (!table_group) {
1273 ret = -ENODEV;
1274 goto unlock_exit;
1275 }
1273 1276
1274 if (tce_groups_attached(container) && (!table_group->ops || 1277 if (tce_groups_attached(container) && (!table_group->ops ||
1275 !table_group->ops->take_ownership || 1278 !table_group->ops->take_ownership ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6432603880c..8f99fe08de02 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
130 130
131static void vhost_init_is_le(struct vhost_virtqueue *vq) 131static void vhost_init_is_le(struct vhost_virtqueue *vq)
132{ 132{
133 if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) 133 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
134 vq->is_le = true; 134 || virtio_legacy_is_little_endian();
135} 135}
136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
137 137
138static void vhost_reset_is_le(struct vhost_virtqueue *vq) 138static void vhost_reset_is_le(struct vhost_virtqueue *vq)
139{ 139{
140 vq->is_le = virtio_legacy_is_little_endian(); 140 vhost_init_is_le(vq);
141} 141}
142 142
143struct vhost_flush_struct { 143struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
1714 int r; 1714 int r;
1715 bool is_le = vq->is_le; 1715 bool is_le = vq->is_le;
1716 1716
1717 if (!vq->private_data) { 1717 if (!vq->private_data)
1718 vhost_reset_is_le(vq);
1719 return 0; 1718 return 0;
1720 }
1721 1719
1722 vhost_init_is_le(vq); 1720 vhost_init_is_le(vq);
1723 1721
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bbbf588540ed..ce5e63d2c66a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
373 373
374static int vhost_vsock_start(struct vhost_vsock *vsock) 374static int vhost_vsock_start(struct vhost_vsock *vsock)
375{ 375{
376 struct vhost_virtqueue *vq;
376 size_t i; 377 size_t i;
377 int ret; 378 int ret;
378 379
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
383 goto err; 384 goto err;
384 385
385 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 386 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
386 struct vhost_virtqueue *vq = &vsock->vqs[i]; 387 vq = &vsock->vqs[i];
387 388
388 mutex_lock(&vq->mutex); 389 mutex_lock(&vq->mutex);
389 390
390 if (!vhost_vq_access_ok(vq)) { 391 if (!vhost_vq_access_ok(vq)) {
391 ret = -EFAULT; 392 ret = -EFAULT;
392 mutex_unlock(&vq->mutex);
393 goto err_vq; 393 goto err_vq;
394 } 394 }
395 395
396 if (!vq->private_data) { 396 if (!vq->private_data) {
397 vq->private_data = vsock; 397 vq->private_data = vsock;
398 vhost_vq_init_access(vq); 398 ret = vhost_vq_init_access(vq);
399 if (ret)
400 goto err_vq;
399 } 401 }
400 402
401 mutex_unlock(&vq->mutex); 403 mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
405 return 0; 407 return 0;
406 408
407err_vq: 409err_vq:
410 vq->private_data = NULL;
411 mutex_unlock(&vq->mutex);
412
408 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 413 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
409 struct vhost_virtqueue *vq = &vsock->vqs[i]; 414 vq = &vsock->vqs[i];
410 415
411 mutex_lock(&vq->mutex); 416 mutex_lock(&vq->mutex);
412 vq->private_data = NULL; 417 vq->private_data = NULL;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b8ba8e..68a113594808 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
163 163
164int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) 164int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
165{ 165{
166 int tooff = 0, fromoff = 0; 166 unsigned int tooff = 0, fromoff = 0;
167 int size; 167 size_t size;
168 168
169 if (to->start > from->start) 169 if (to->start > from->start)
170 fromoff = to->start - from->start; 170 fromoff = to->start - from->start;
171 else 171 else
172 tooff = from->start - to->start; 172 tooff = from->start - to->start;
173 size = to->len - tooff; 173 if (fromoff >= from->len || tooff >= to->len)
174 if (size > (int) (from->len - fromoff)) 174 return -EINVAL;
175 size = from->len - fromoff; 175
176 if (size <= 0) 176 size = min_t(size_t, to->len - tooff, from->len - fromoff);
177 if (size == 0)
177 return -EINVAL; 178 return -EINVAL;
178 size *= sizeof(u16); 179 size *= sizeof(u16);
179 180
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
187 188
188int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) 189int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
189{ 190{
190 int tooff = 0, fromoff = 0; 191 unsigned int tooff = 0, fromoff = 0;
191 int size; 192 size_t size;
192 193
193 if (to->start > from->start) 194 if (to->start > from->start)
194 fromoff = to->start - from->start; 195 fromoff = to->start - from->start;
195 else 196 else
196 tooff = from->start - to->start; 197 tooff = from->start - to->start;
197 size = to->len - tooff; 198 if (fromoff >= from->len || tooff >= to->len)
198 if (size > (int) (from->len - fromoff)) 199 return -EINVAL;
199 size = from->len - fromoff; 200
200 if (size <= 0) 201 size = min_t(size_t, to->len - tooff, from->len - fromoff);
202 if (size == 0)
201 return -EINVAL; 203 return -EINVAL;
202 size *= sizeof(u16); 204 size *= sizeof(u16);
203 205
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index d47a2fcef818..c71fde5fe835 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -59,6 +59,7 @@
59#define pr_fmt(fmt) "virtio-mmio: " fmt 59#define pr_fmt(fmt) "virtio-mmio: " fmt
60 60
61#include <linux/acpi.h> 61#include <linux/acpi.h>
62#include <linux/dma-mapping.h>
62#include <linux/highmem.h> 63#include <linux/highmem.h>
63#include <linux/interrupt.h> 64#include <linux/interrupt.h>
64#include <linux/io.h> 65#include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
498 struct virtio_mmio_device *vm_dev; 499 struct virtio_mmio_device *vm_dev;
499 struct resource *mem; 500 struct resource *mem;
500 unsigned long magic; 501 unsigned long magic;
502 int rc;
501 503
502 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 504 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
503 if (!mem) 505 if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
547 } 549 }
548 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 550 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
549 551
550 if (vm_dev->version == 1) 552 if (vm_dev->version == 1) {
551 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 553 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
552 554
555 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
556 /*
557 * In the legacy case, ensure our coherently-allocated virtio
558 * ring will be at an address expressable as a 32-bit PFN.
559 */
560 if (!rc)
561 dma_set_coherent_mask(&pdev->dev,
562 DMA_BIT_MASK(32 + PAGE_SHIFT));
563 } else {
564 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
565 }
566 if (rc)
567 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
568 if (rc)
569 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
570
553 platform_set_drvdata(pdev, vm_dev); 571 platform_set_drvdata(pdev, vm_dev);
554 572
555 return register_virtio_device(&vm_dev->vdev); 573 return register_virtio_device(&vm_dev->vdev);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f905d6eeb048..f8afc6dcc29f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
414 if (map == SWIOTLB_MAP_ERROR) 414 if (map == SWIOTLB_MAP_ERROR)
415 return DMA_ERROR_CODE; 415 return DMA_ERROR_CODE;
416 416
417 dev_addr = xen_phys_to_bus(map);
417 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), 418 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
418 dev_addr, map & ~PAGE_MASK, size, dir, attrs); 419 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
419 dev_addr = xen_phys_to_bus(map);
420 420
421 /* 421 /*
422 * Ensure that the address returned is DMA'ble 422 * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
575 sg_dma_len(sgl) = 0; 575 sg_dma_len(sgl) = 0;
576 return 0; 576 return 0;
577 } 577 }
578 dev_addr = xen_phys_to_bus(map);
578 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), 579 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
579 dev_addr, 580 dev_addr,
580 map & ~PAGE_MASK, 581 map & ~PAGE_MASK,
581 sg->length, 582 sg->length,
582 dir, 583 dir,
583 attrs); 584 attrs);
584 sg->dma_address = xen_phys_to_bus(map); 585 sg->dma_address = dev_addr;
585 } else { 586 } else {
586 /* we are not interested in the dma_addr returned by 587 /* we are not interested in the dma_addr returned by
587 * xen_dma_map_page, only in the potential cache flushes executed 588 * xen_dma_map_page, only in the potential cache flushes executed
diff --git a/fs/Kconfig b/fs/Kconfig
index c2a377cdda2b..83eab52fb3f6 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -38,6 +38,7 @@ config FS_DAX
38 bool "Direct Access (DAX) support" 38 bool "Direct Access (DAX) support"
39 depends on MMU 39 depends on MMU
40 depends on !(ARM || MIPS || SPARC) 40 depends on !(ARM || MIPS || SPARC)
41 select FS_IOMAP
41 help 42 help
42 Direct Access (DAX) can be used on memory-backed block devices. 43 Direct Access (DAX) can be used on memory-backed block devices.
43 If the block device supports DAX and the filesystem supports DAX, 44 If the block device supports DAX and the filesystem supports DAX,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 5db5d1340d69..3c47614a4b32 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
331 struct blk_plug plug; 331 struct blk_plug plug;
332 struct blkdev_dio *dio; 332 struct blkdev_dio *dio;
333 struct bio *bio; 333 struct bio *bio;
334 bool is_read = (iov_iter_rw(iter) == READ); 334 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
335 loff_t pos = iocb->ki_pos; 335 loff_t pos = iocb->ki_pos;
336 blk_qc_t qc = BLK_QC_T_NONE; 336 blk_qc_t qc = BLK_QC_T_NONE;
337 int ret; 337 int ret;
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
344 bio_get(bio); /* extra ref for the completion handler */ 344 bio_get(bio); /* extra ref for the completion handler */
345 345
346 dio = container_of(bio, struct blkdev_dio, bio); 346 dio = container_of(bio, struct blkdev_dio, bio);
347 dio->is_sync = is_sync_kiocb(iocb); 347 dio->is_sync = is_sync = is_sync_kiocb(iocb);
348 if (dio->is_sync) 348 if (dio->is_sync)
349 dio->waiter = current; 349 dio->waiter = current;
350 else 350 else
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
398 } 398 }
399 blk_finish_plug(&plug); 399 blk_finish_plug(&plug);
400 400
401 if (!dio->is_sync) 401 if (!is_sync)
402 return -EIOCBQUEUED; 402 return -EIOCBQUEUED;
403 403
404 for (;;) { 404 for (;;) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4e024260ad71..1e861a063721 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3835,10 +3835,7 @@ cache_acl:
3835 break; 3835 break;
3836 case S_IFDIR: 3836 case S_IFDIR:
3837 inode->i_fop = &btrfs_dir_file_operations; 3837 inode->i_fop = &btrfs_dir_file_operations;
3838 if (root == fs_info->tree_root) 3838 inode->i_op = &btrfs_dir_inode_operations;
3839 inode->i_op = &btrfs_dir_ro_inode_operations;
3840 else
3841 inode->i_op = &btrfs_dir_inode_operations;
3842 break; 3839 break;
3843 case S_IFLNK: 3840 case S_IFLNK:
3844 inode->i_op = &btrfs_symlink_inode_operations; 3841 inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
4505 if (found_type > min_type) { 4502 if (found_type > min_type) {
4506 del_item = 1; 4503 del_item = 1;
4507 } else { 4504 } else {
4508 if (item_end < new_size) 4505 if (item_end < new_size) {
4506 /*
4507 * With NO_HOLES mode, for the following mapping
4508 *
4509 * [0-4k][hole][8k-12k]
4510 *
4511 * if truncating isize down to 6k, it ends up
4512 * isize being 8k.
4513 */
4514 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
4515 last_size = new_size;
4509 break; 4516 break;
4517 }
4510 if (found_key.offset >= new_size) 4518 if (found_key.offset >= new_size)
4511 del_item = 1; 4519 del_item = 1;
4512 else 4520 else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
5710 5718
5711 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5719 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5712 inode->i_op = &btrfs_dir_ro_inode_operations; 5720 inode->i_op = &btrfs_dir_ro_inode_operations;
5721 inode->i_opflags &= ~IOP_XATTR;
5713 inode->i_fop = &simple_dir_operations; 5722 inode->i_fop = &simple_dir_operations;
5714 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5723 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5715 inode->i_mtime = current_time(inode); 5724 inode->i_mtime = current_time(inode);
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7215 struct extent_map *em = NULL; 7224 struct extent_map *em = NULL;
7216 int ret; 7225 int ret;
7217 7226
7218 down_read(&BTRFS_I(inode)->dio_sem);
7219 if (type != BTRFS_ORDERED_NOCOW) { 7227 if (type != BTRFS_ORDERED_NOCOW) {
7220 em = create_pinned_em(inode, start, len, orig_start, 7228 em = create_pinned_em(inode, start, len, orig_start,
7221 block_start, block_len, orig_block_len, 7229 block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7234 em = ERR_PTR(ret); 7242 em = ERR_PTR(ret);
7235 } 7243 }
7236 out: 7244 out:
7237 up_read(&BTRFS_I(inode)->dio_sem);
7238 7245
7239 return em; 7246 return em;
7240} 7247}
@@ -8692,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8692 dio_data.unsubmitted_oe_range_start = (u64)offset; 8699 dio_data.unsubmitted_oe_range_start = (u64)offset;
8693 dio_data.unsubmitted_oe_range_end = (u64)offset; 8700 dio_data.unsubmitted_oe_range_end = (u64)offset;
8694 current->journal_info = &dio_data; 8701 current->journal_info = &dio_data;
8702 down_read(&BTRFS_I(inode)->dio_sem);
8695 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8703 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8696 &BTRFS_I(inode)->runtime_flags)) { 8704 &BTRFS_I(inode)->runtime_flags)) {
8697 inode_dio_end(inode); 8705 inode_dio_end(inode);
@@ -8704,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8704 iter, btrfs_get_blocks_direct, NULL, 8712 iter, btrfs_get_blocks_direct, NULL,
8705 btrfs_submit_direct, flags); 8713 btrfs_submit_direct, flags);
8706 if (iov_iter_rw(iter) == WRITE) { 8714 if (iov_iter_rw(iter) == WRITE) {
8715 up_read(&BTRFS_I(inode)->dio_sem);
8707 current->journal_info = NULL; 8716 current->journal_info = NULL;
8708 if (ret < 0 && ret != -EIOCBQUEUED) { 8717 if (ret < 0 && ret != -EIOCBQUEUED) {
8709 if (dio_data.reserve) 8718 if (dio_data.reserve)
@@ -9212,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
9212 break; 9221 break;
9213 } 9222 }
9214 9223
9224 btrfs_block_rsv_release(fs_info, rsv, -1);
9215 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 9225 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9216 rsv, min_size, 0); 9226 rsv, min_size, 0);
9217 BUG_ON(ret); /* shouldn't happen */ 9227 BUG_ON(ret); /* shouldn't happen */
@@ -10579,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
10579static const struct inode_operations btrfs_dir_ro_inode_operations = { 10589static const struct inode_operations btrfs_dir_ro_inode_operations = {
10580 .lookup = btrfs_lookup, 10590 .lookup = btrfs_lookup,
10581 .permission = btrfs_permission, 10591 .permission = btrfs_permission,
10582 .get_acl = btrfs_get_acl,
10583 .set_acl = btrfs_set_acl,
10584 .update_time = btrfs_update_time, 10592 .update_time = btrfs_update_time,
10585}; 10593};
10586 10594
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8f6a2a5863b9..a27fc8791551 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
285 rc = -ENOMEM; 285 rc = -ENOMEM;
286 goto error_exit; 286 goto error_exit;
287 } 287 }
288 spin_lock_init(&cifsFile->file_info_lock);
288 file->private_data = cifsFile; 289 file->private_data = cifsFile;
289 cifsFile->tlink = cifs_get_tlink(tlink); 290 cifsFile->tlink = cifs_get_tlink(tlink);
290 tcon = tlink_tcon(tlink); 291 tcon = tlink_tcon(tlink);
diff --git a/fs/dax.c b/fs/dax.c
index ddcddfeaa03b..c45598b912e1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
990} 990}
991EXPORT_SYMBOL_GPL(__dax_zero_page_range); 991EXPORT_SYMBOL_GPL(__dax_zero_page_range);
992 992
993#ifdef CONFIG_FS_IOMAP
994static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 993static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
995{ 994{
996 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 995 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1032,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1032 struct blk_dax_ctl dax = { 0 }; 1031 struct blk_dax_ctl dax = { 0 };
1033 ssize_t map_len; 1032 ssize_t map_len;
1034 1033
1034 if (fatal_signal_pending(current)) {
1035 ret = -EINTR;
1036 break;
1037 }
1038
1035 dax.sector = dax_iomap_sector(iomap, pos); 1039 dax.sector = dax_iomap_sector(iomap, pos);
1036 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; 1040 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1037 map_len = dax_map_atomic(iomap->bdev, &dax); 1041 map_len = dax_map_atomic(iomap->bdev, &dax);
@@ -1428,4 +1432,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1428} 1432}
1429EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); 1433EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1430#endif /* CONFIG_FS_DAX_PMD */ 1434#endif /* CONFIG_FS_DAX_PMD */
1431#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 36bea5adcaba..c634874e12d9 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,6 +1,5 @@
1config EXT2_FS 1config EXT2_FS
2 tristate "Second extended fs support" 2 tristate "Second extended fs support"
3 select FS_IOMAP if FS_DAX
4 help 3 help
5 Ext2 is a standard Linux file system for hard disks. 4 Ext2 is a standard Linux file system for hard disks.
6 5
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 7b90691e98c4..e38039fd96ff 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,7 +37,6 @@ config EXT4_FS
37 select CRC16 37 select CRC16
38 select CRYPTO 38 select CRYPTO
39 select CRYPTO_CRC32C 39 select CRYPTO_CRC32C
40 select FS_IOMAP if FS_DAX
41 help 40 help
42 This is the next generation of the ext3 filesystem. 41 This is the next generation of the ext3 filesystem.
43 42
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 4304072161aa..40d61077bead 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
542 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { 542 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
543 if (invalidate) 543 if (invalidate)
544 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); 544 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
545 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
545 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); 546 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
546 } 547 }
547 } else { 548 } else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
560 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, 561 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
561 TASK_UNINTERRUPTIBLE); 562 TASK_UNINTERRUPTIBLE);
562 563
564 /* Make sure any pending writes are cancelled. */
565 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
566 fscache_invalidate_writes(cookie);
567
563 /* Reset the cookie state if it wasn't relinquished */ 568 /* Reset the cookie state if it wasn't relinquished */
564 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { 569 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
565 atomic_inc(&cookie->n_active); 570 atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649df3a1..a8aa00be4444 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
48 cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; 48 cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
49 49
50 spin_lock_init(&cookie->lock); 50 spin_lock_init(&cookie->lock);
51 spin_lock_init(&cookie->stores_lock);
51 INIT_HLIST_HEAD(&cookie->backing_objects); 52 INIT_HLIST_HEAD(&cookie->backing_objects);
52 53
53 /* check the netfs type is not already present */ 54 /* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e30f4db..7a182c87f378 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
30static const struct fscache_state *fscache_object_available(struct fscache_object *, int); 30static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); 31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
32static const struct fscache_state *fscache_update_object(struct fscache_object *, int); 32static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
33static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
33 34
34#define __STATE_NAME(n) fscache_osm_##n 35#define __STATE_NAME(n) fscache_osm_##n
35#define STATE(n) (&__STATE_NAME(n)) 36#define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
91static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); 92static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
92static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); 93static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
93static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); 94static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
94static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); 95static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
95 96
96static WAIT_STATE(WAIT_FOR_INIT, "?INI", 97static WAIT_STATE(WAIT_FOR_INIT, "?INI",
97 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); 98 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
229 event = -1; 230 event = -1;
230 if (new_state == NO_TRANSIT) { 231 if (new_state == NO_TRANSIT) {
231 _debug("{OBJ%x} %s notrans", object->debug_id, state->name); 232 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
233 if (unlikely(state == STATE(OBJECT_DEAD))) {
234 _leave(" [dead]");
235 return;
236 }
232 fscache_enqueue_object(object); 237 fscache_enqueue_object(object);
233 event_mask = object->oob_event_mask; 238 event_mask = object->oob_event_mask;
234 goto unmask_events; 239 goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
239 object->state = state = new_state; 244 object->state = state = new_state;
240 245
241 if (state->work) { 246 if (state->work) {
242 if (unlikely(state->work == ((void *)2UL))) { 247 if (unlikely(state == STATE(OBJECT_DEAD))) {
243 _leave(" [dead]"); 248 _leave(" [dead]");
244 return; 249 return;
245 } 250 }
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
645 fscache_mark_object_dead(object); 650 fscache_mark_object_dead(object);
646 object->oob_event_mask = 0; 651 object->oob_event_mask = 0;
647 652
653 if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
654 /* Reject any new read/write ops and abort any that are pending. */
655 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
656 fscache_cancel_all_ops(object);
657 }
658
648 if (list_empty(&object->dependents) && 659 if (list_empty(&object->dependents) &&
649 object->n_ops == 0 && 660 object->n_ops == 0 &&
650 object->n_children == 0) 661 object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
1077 } 1088 }
1078} 1089}
1079EXPORT_SYMBOL(fscache_object_mark_killed); 1090EXPORT_SYMBOL(fscache_object_mark_killed);
1091
1092/*
1093 * The object is dead. We can get here if an object gets queued by an event
1094 * that would lead to its death (such as EV_KILL) when the dispatcher is
1095 * already running (and so can be requeued) but hasn't yet cleared the event
1096 * mask.
1097 */
1098static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
1099 int event)
1100{
1101 if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
1102 &object->flags))
1103 return NO_TRANSIT;
1104
1105 WARN(true, "FS-Cache object redispatched after death");
1106 return NO_TRANSIT;
1107}
diff --git a/fs/iomap.c b/fs/iomap.c
index 354a123f170e..a51cb4c07d4d 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
114 114
115 BUG_ON(pos + len > iomap->offset + iomap->length); 115 BUG_ON(pos + len > iomap->offset + iomap->length);
116 116
117 if (fatal_signal_pending(current))
118 return -EINTR;
119
117 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 120 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
118 if (!page) 121 if (!page)
119 return -ENOMEM; 122 return -ENOMEM;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ecc151697fd4..0a0eaecf9676 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2700,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2700 sattr->ia_valid |= ATTR_MTIME; 2700 sattr->ia_valid |= ATTR_MTIME;
2701 2701
2702 /* Except MODE, it seems harmless of setting twice. */ 2702 /* Except MODE, it seems harmless of setting twice. */
2703 if ((attrset[1] & FATTR4_WORD1_MODE)) 2703 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
2704 attrset[1] & FATTR4_WORD1_MODE)
2704 sattr->ia_valid &= ~ATTR_MODE; 2705 sattr->ia_valid &= ~ATTR_MODE;
2705 2706
2706 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2707 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -8490,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
8490 goto out; 8491 goto out;
8491 } 8492 }
8492 8493
8494 nfs4_sequence_free_slot(&lgp->res.seq_res);
8493 err = nfs4_handle_exception(server, nfs4err, exception); 8495 err = nfs4_handle_exception(server, nfs4err, exception);
8494 if (!status) { 8496 if (!status) {
8495 if (exception->retry) 8497 if (exception->retry)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 90e6193ce6be..daeb94e3acd4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
1091 case -NFS4ERR_BADXDR: 1091 case -NFS4ERR_BADXDR:
1092 case -NFS4ERR_RESOURCE: 1092 case -NFS4ERR_RESOURCE:
1093 case -NFS4ERR_NOFILEHANDLE: 1093 case -NFS4ERR_NOFILEHANDLE:
1094 case -NFS4ERR_MOVED:
1094 /* Non-seqid mutating errors */ 1095 /* Non-seqid mutating errors */
1095 return; 1096 return;
1096 }; 1097 };
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 59554f3adf29..dd042498ce7c 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino)
1200 1200
1201 send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); 1201 send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1202 spin_unlock(&ino->i_lock); 1202 spin_unlock(&ino->i_lock);
1203 pnfs_free_lseg_list(&tmp_list);
1204 if (send) 1203 if (send)
1205 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); 1204 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1206out_put_layout_hdr: 1205out_put_layout_hdr:
1206 pnfs_free_lseg_list(&tmp_list);
1207 pnfs_put_layout_hdr(lo); 1207 pnfs_put_layout_hdr(lo);
1208out: 1208out:
1209 dprintk("<-- %s status: %d\n", __func__, status); 1209 dprintk("<-- %s status: %d\n", __func__, status);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 596205d939a1..1fc07a9c70e9 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
223 struct nfs4_layout_stateid *ls; 223 struct nfs4_layout_stateid *ls;
224 struct nfs4_stid *stp; 224 struct nfs4_stid *stp;
225 225
226 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); 226 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
227 nfsd4_free_layout_stateid);
227 if (!stp) 228 if (!stp)
228 return NULL; 229 return NULL;
229 stp->sc_free = nfsd4_free_layout_stateid; 230
230 get_nfs4_file(fp); 231 get_nfs4_file(fp);
231 stp->sc_file = fp; 232 stp->sc_file = fp;
232 233
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4b4beaaa4eaa..a0dee8ae9f97 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -633,8 +633,8 @@ out:
633 return co; 633 return co;
634} 634}
635 635
636struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 636struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
637 struct kmem_cache *slab) 637 void (*sc_free)(struct nfs4_stid *))
638{ 638{
639 struct nfs4_stid *stid; 639 struct nfs4_stid *stid;
640 int new_id; 640 int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
650 idr_preload_end(); 650 idr_preload_end();
651 if (new_id < 0) 651 if (new_id < 0)
652 goto out_free; 652 goto out_free;
653
654 stid->sc_free = sc_free;
653 stid->sc_client = cl; 655 stid->sc_client = cl;
654 stid->sc_stateid.si_opaque.so_id = new_id; 656 stid->sc_stateid.si_opaque.so_id = new_id;
655 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 657 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
675static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 677static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
676{ 678{
677 struct nfs4_stid *stid; 679 struct nfs4_stid *stid;
678 struct nfs4_ol_stateid *stp;
679 680
680 stid = nfs4_alloc_stid(clp, stateid_slab); 681 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
681 if (!stid) 682 if (!stid)
682 return NULL; 683 return NULL;
683 684
684 stp = openlockstateid(stid); 685 return openlockstateid(stid);
685 stp->st_stid.sc_free = nfs4_free_ol_stateid;
686 return stp;
687} 686}
688 687
689static void nfs4_free_deleg(struct nfs4_stid *stid) 688static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
781 goto out_dec; 780 goto out_dec;
782 if (delegation_blocked(&current_fh->fh_handle)) 781 if (delegation_blocked(&current_fh->fh_handle))
783 goto out_dec; 782 goto out_dec;
784 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 783 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
785 if (dp == NULL) 784 if (dp == NULL)
786 goto out_dec; 785 goto out_dec;
787 786
788 dp->dl_stid.sc_free = nfs4_free_deleg;
789 /* 787 /*
790 * delegation seqid's are never incremented. The 4.1 special 788 * delegation seqid's are never incremented. The 4.1 special
791 * meaning of seqid 0 isn't meaningful, really, but let's avoid 789 * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5580 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 5578 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5581 get_nfs4_file(fp); 5579 get_nfs4_file(fp);
5582 stp->st_stid.sc_file = fp; 5580 stp->st_stid.sc_file = fp;
5583 stp->st_stid.sc_free = nfs4_free_lock_stateid;
5584 stp->st_access_bmap = 0; 5581 stp->st_access_bmap = 0;
5585 stp->st_deny_bmap = open_stp->st_deny_bmap; 5582 stp->st_deny_bmap = open_stp->st_deny_bmap;
5586 stp->st_openstp = open_stp; 5583 stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5623 lst = find_lock_stateid(lo, fi); 5620 lst = find_lock_stateid(lo, fi);
5624 if (lst == NULL) { 5621 if (lst == NULL) {
5625 spin_unlock(&clp->cl_lock); 5622 spin_unlock(&clp->cl_lock);
5626 ns = nfs4_alloc_stid(clp, stateid_slab); 5623 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5627 if (ns == NULL) 5624 if (ns == NULL)
5628 return NULL; 5625 return NULL;
5629 5626
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index c9399366f9df..4516e8b7d776 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
603__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 603__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
604 stateid_t *stateid, unsigned char typemask, 604 stateid_t *stateid, unsigned char typemask,
605 struct nfs4_stid **s, struct nfsd_net *nn); 605 struct nfs4_stid **s, struct nfsd_net *nn);
606struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 606struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
607 struct kmem_cache *slab); 607 void (*sc_free)(struct nfs4_stid *));
608void nfs4_unhash_stid(struct nfs4_stid *s); 608void nfs4_unhash_stid(struct nfs4_stid *s);
609void nfs4_put_stid(struct nfs4_stid *s); 609void nfs4_put_stid(struct nfs4_stid *s);
610void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); 610void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 26c6fdb4bf67..ca13236dbb1f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -332,37 +332,6 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
332 } 332 }
333} 333}
334 334
335static __be32
336nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
337 struct iattr *iap)
338{
339 struct inode *inode = d_inode(fhp->fh_dentry);
340 int host_err;
341
342 if (iap->ia_size < inode->i_size) {
343 __be32 err;
344
345 err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
346 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
347 if (err)
348 return err;
349 }
350
351 host_err = get_write_access(inode);
352 if (host_err)
353 goto out_nfserrno;
354
355 host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
356 if (host_err)
357 goto out_put_write_access;
358 return 0;
359
360out_put_write_access:
361 put_write_access(inode);
362out_nfserrno:
363 return nfserrno(host_err);
364}
365
366/* 335/*
367 * Set various file attributes. After this call fhp needs an fh_put. 336 * Set various file attributes. After this call fhp needs an fh_put.
368 */ 337 */
@@ -377,7 +346,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
377 __be32 err; 346 __be32 err;
378 int host_err; 347 int host_err;
379 bool get_write_count; 348 bool get_write_count;
380 int size_change = 0;
381 349
382 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE)) 350 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
383 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE; 351 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
@@ -390,11 +358,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
390 /* Get inode */ 358 /* Get inode */
391 err = fh_verify(rqstp, fhp, ftype, accmode); 359 err = fh_verify(rqstp, fhp, ftype, accmode);
392 if (err) 360 if (err)
393 goto out; 361 return err;
394 if (get_write_count) { 362 if (get_write_count) {
395 host_err = fh_want_write(fhp); 363 host_err = fh_want_write(fhp);
396 if (host_err) 364 if (host_err)
397 return nfserrno(host_err); 365 goto out_host_err;
398 } 366 }
399 367
400 dentry = fhp->fh_dentry; 368 dentry = fhp->fh_dentry;
@@ -405,50 +373,59 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
405 iap->ia_valid &= ~ATTR_MODE; 373 iap->ia_valid &= ~ATTR_MODE;
406 374
407 if (!iap->ia_valid) 375 if (!iap->ia_valid)
408 goto out; 376 return 0;
409 377
410 nfsd_sanitize_attrs(inode, iap); 378 nfsd_sanitize_attrs(inode, iap);
411 379
380 if (check_guard && guardtime != inode->i_ctime.tv_sec)
381 return nfserr_notsync;
382
412 /* 383 /*
413 * The size case is special, it changes the file in addition to the 384 * The size case is special, it changes the file in addition to the
414 * attributes. 385 * attributes, and file systems don't expect it to be mixed with
386 * "random" attribute changes. We thus split out the size change
387 * into a separate call for vfs_truncate, and do the rest as a
388 * a separate setattr call.
415 */ 389 */
416 if (iap->ia_valid & ATTR_SIZE) { 390 if (iap->ia_valid & ATTR_SIZE) {
417 err = nfsd_get_write_access(rqstp, fhp, iap); 391 struct path path = {
418 if (err) 392 .mnt = fhp->fh_export->ex_path.mnt,
419 goto out; 393 .dentry = dentry,
420 size_change = 1; 394 };
395 bool implicit_mtime = false;
421 396
422 /* 397 /*
423 * RFC5661, Section 18.30.4: 398 * vfs_truncate implicity updates the mtime IFF the file size
424 * Changing the size of a file with SETATTR indirectly 399 * actually changes. Avoid the additional seattr call below if
425 * changes the time_modify and change attributes. 400 * the only other attribute that the client sends is the mtime.
426 *
427 * (and similar for the older RFCs)
428 */ 401 */
429 if (iap->ia_size != i_size_read(inode)) 402 if (iap->ia_size != i_size_read(inode) &&
430 iap->ia_valid |= ATTR_MTIME; 403 ((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0))
431 } 404 implicit_mtime = true;
432 405
433 iap->ia_valid |= ATTR_CTIME; 406 host_err = vfs_truncate(&path, iap->ia_size);
407 if (host_err)
408 goto out_host_err;
434 409
435 if (check_guard && guardtime != inode->i_ctime.tv_sec) { 410 iap->ia_valid &= ~ATTR_SIZE;
436 err = nfserr_notsync; 411 if (implicit_mtime)
437 goto out_put_write_access; 412 iap->ia_valid &= ~ATTR_MTIME;
413 if (!iap->ia_valid)
414 goto done;
438 } 415 }
439 416
417 iap->ia_valid |= ATTR_CTIME;
418
440 fh_lock(fhp); 419 fh_lock(fhp);
441 host_err = notify_change(dentry, iap, NULL); 420 host_err = notify_change(dentry, iap, NULL);
442 fh_unlock(fhp); 421 fh_unlock(fhp);
443 err = nfserrno(host_err); 422 if (host_err)
423 goto out_host_err;
444 424
445out_put_write_access: 425done:
446 if (size_change) 426 host_err = commit_metadata(fhp);
447 put_write_access(inode); 427out_host_err:
448 if (!err) 428 return nfserrno(host_err);
449 err = nfserrno(commit_metadata(fhp));
450out:
451 return err;
452} 429}
453 430
454#if defined(CONFIG_NFSD_V4) 431#if defined(CONFIG_NFSD_V4)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e7e61b28f31..87c9a9aacda3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3179 iter.tgid += 1, iter = next_tgid(ns, iter)) { 3179 iter.tgid += 1, iter = next_tgid(ns, iter)) {
3180 char name[PROC_NUMBUF]; 3180 char name[PROC_NUMBUF];
3181 int len; 3181 int len;
3182
3183 cond_resched();
3182 if (!has_pid_permissions(ns, iter.task, 2)) 3184 if (!has_pid_permissions(ns, iter.task, 2))
3183 continue; 3185 continue;
3184 3186
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38dfafa..0186fe6d39f3 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
74#include <linux/highmem.h> 74#include <linux/highmem.h>
75#include <linux/pagemap.h> 75#include <linux/pagemap.h>
76#include <linux/uaccess.h> 76#include <linux/uaccess.h>
77#include <linux/major.h>
77#include "internal.h" 78#include "internal.h"
78 79
79static struct kmem_cache *romfs_inode_cachep; 80static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
416static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) 417static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
417{ 418{
418 struct super_block *sb = dentry->d_sb; 419 struct super_block *sb = dentry->d_sb;
419 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 420 u64 id = 0;
421
422 /* When calling huge_encode_dev(),
423 * use sb->s_bdev->bd_dev when,
424 * - CONFIG_ROMFS_ON_BLOCK defined
425 * use sb->s_dev when,
426 * - CONFIG_ROMFS_ON_BLOCK undefined and
427 * - CONFIG_ROMFS_ON_MTD defined
428 * leave id as 0 when,
429 * - CONFIG_ROMFS_ON_BLOCK undefined and
430 * - CONFIG_ROMFS_ON_MTD undefined
431 */
432 if (sb->s_bdev)
433 id = huge_encode_dev(sb->s_bdev->bd_dev);
434 else if (sb->s_dev)
435 id = huge_encode_dev(sb->s_dev);
420 436
421 buf->f_type = ROMFS_MAGIC; 437 buf->f_type = ROMFS_MAGIC;
422 buf->f_namelen = ROMFS_MAXFN; 438 buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
489 sb->s_flags |= MS_RDONLY | MS_NOATIME; 505 sb->s_flags |= MS_RDONLY | MS_NOATIME;
490 sb->s_op = &romfs_super_ops; 506 sb->s_op = &romfs_super_ops;
491 507
508#ifdef CONFIG_ROMFS_ON_MTD
509 /* Use same dev ID from the underlying mtdblock device */
510 if (sb->s_mtd)
511 sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
512#endif
492 /* read the image superblock and check it */ 513 /* read the image superblock and check it */
493 rsb = kmalloc(512, GFP_KERNEL); 514 rsb = kmalloc(512, GFP_KERNEL);
494 if (!rsb) 515 if (!rsb)
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index d96e2f30084b..43953e03c356 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
63 struct uffd_msg msg; 63 struct uffd_msg msg;
64 wait_queue_t wq; 64 wait_queue_t wq;
65 struct userfaultfd_ctx *ctx; 65 struct userfaultfd_ctx *ctx;
66 bool waken;
66}; 67};
67 68
68struct userfaultfd_wake_range { 69struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
86 if (len && (start > uwq->msg.arg.pagefault.address || 87 if (len && (start > uwq->msg.arg.pagefault.address ||
87 start + len <= uwq->msg.arg.pagefault.address)) 88 start + len <= uwq->msg.arg.pagefault.address))
88 goto out; 89 goto out;
90 WRITE_ONCE(uwq->waken, true);
91 /*
92 * The implicit smp_mb__before_spinlock in try_to_wake_up()
93 * renders uwq->waken visible to other CPUs before the task is
94 * waken.
95 */
89 ret = wake_up_state(wq->private, mode); 96 ret = wake_up_state(wq->private, mode);
90 if (ret) 97 if (ret)
91 /* 98 /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
264 struct userfaultfd_wait_queue uwq; 271 struct userfaultfd_wait_queue uwq;
265 int ret; 272 int ret;
266 bool must_wait, return_to_userland; 273 bool must_wait, return_to_userland;
274 long blocking_state;
267 275
268 BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 276 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
269 277
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
334 uwq.wq.private = current; 342 uwq.wq.private = current;
335 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); 343 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
336 uwq.ctx = ctx; 344 uwq.ctx = ctx;
345 uwq.waken = false;
337 346
338 return_to_userland = 347 return_to_userland =
339 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == 348 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
340 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); 349 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
350 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
351 TASK_KILLABLE;
341 352
342 spin_lock(&ctx->fault_pending_wqh.lock); 353 spin_lock(&ctx->fault_pending_wqh.lock);
343 /* 354 /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
350 * following the spin_unlock to happen before the list_add in 361 * following the spin_unlock to happen before the list_add in
351 * __add_wait_queue. 362 * __add_wait_queue.
352 */ 363 */
353 set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : 364 set_current_state(blocking_state);
354 TASK_KILLABLE);
355 spin_unlock(&ctx->fault_pending_wqh.lock); 365 spin_unlock(&ctx->fault_pending_wqh.lock);
356 366
357 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, 367 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
364 wake_up_poll(&ctx->fd_wqh, POLLIN); 374 wake_up_poll(&ctx->fd_wqh, POLLIN);
365 schedule(); 375 schedule();
366 ret |= VM_FAULT_MAJOR; 376 ret |= VM_FAULT_MAJOR;
377
378 /*
379 * False wakeups can orginate even from rwsem before
380 * up_read() however userfaults will wait either for a
381 * targeted wakeup on the specific uwq waitqueue from
382 * wake_userfault() or for signals or for uffd
383 * release.
384 */
385 while (!READ_ONCE(uwq.waken)) {
386 /*
387 * This needs the full smp_store_mb()
388 * guarantee as the state write must be
389 * visible to other CPUs before reading
390 * uwq.waken from other CPUs.
391 */
392 set_current_state(blocking_state);
393 if (READ_ONCE(uwq.waken) ||
394 READ_ONCE(ctx->released) ||
395 (return_to_userland ? signal_pending(current) :
396 fatal_signal_pending(current)))
397 break;
398 schedule();
399 }
367 } 400 }
368 401
369 __set_current_state(TASK_RUNNING); 402 __set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index d346d42c54d1..33db69be4832 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
39#include "xfs_rmap_btree.h" 39#include "xfs_rmap_btree.h"
40#include "xfs_btree.h" 40#include "xfs_btree.h"
41#include "xfs_refcount_btree.h" 41#include "xfs_refcount_btree.h"
42#include "xfs_ialloc_btree.h"
42 43
43/* 44/*
44 * Per-AG Block Reservations 45 * Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
200 struct xfs_mount *mp = pag->pag_mount; 201 struct xfs_mount *mp = pag->pag_mount;
201 struct xfs_ag_resv *resv; 202 struct xfs_ag_resv *resv;
202 int error; 203 int error;
204 xfs_extlen_t reserved;
203 205
204 resv = xfs_perag_resv(pag, type);
205 if (used > ask) 206 if (used > ask)
206 ask = used; 207 ask = used;
207 resv->ar_asked = ask; 208 reserved = ask - used;
208 resv->ar_reserved = resv->ar_orig_reserved = ask - used;
209 mp->m_ag_max_usable -= ask;
210 209
211 trace_xfs_ag_resv_init(pag, type, ask); 210 error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
212 211 if (error) {
213 error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
214 if (error)
215 trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, 212 trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
216 error, _RET_IP_); 213 error, _RET_IP_);
214 xfs_warn(mp,
215"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
216 pag->pag_agno);
217 return error;
218 }
217 219
218 return error; 220 mp->m_ag_max_usable -= ask;
221
222 resv = xfs_perag_resv(pag, type);
223 resv->ar_asked = ask;
224 resv->ar_reserved = resv->ar_orig_reserved = reserved;
225
226 trace_xfs_ag_resv_init(pag, type, ask);
227 return 0;
219} 228}
220 229
221/* Create a per-AG block reservation. */ 230/* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
223xfs_ag_resv_init( 232xfs_ag_resv_init(
224 struct xfs_perag *pag) 233 struct xfs_perag *pag)
225{ 234{
235 struct xfs_mount *mp = pag->pag_mount;
236 xfs_agnumber_t agno = pag->pag_agno;
226 xfs_extlen_t ask; 237 xfs_extlen_t ask;
227 xfs_extlen_t used; 238 xfs_extlen_t used;
228 int error = 0; 239 int error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
231 if (pag->pag_meta_resv.ar_asked == 0) { 242 if (pag->pag_meta_resv.ar_asked == 0) {
232 ask = used = 0; 243 ask = used = 0;
233 244
234 error = xfs_refcountbt_calc_reserves(pag->pag_mount, 245 error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
235 pag->pag_agno, &ask, &used);
236 if (error) 246 if (error)
237 goto out; 247 goto out;
238 248
239 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, 249 error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
240 ask, used);
241 if (error) 250 if (error)
242 goto out; 251 goto out;
252
253 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
254 ask, used);
255 if (error) {
256 /*
257 * Because we didn't have per-AG reservations when the
258 * finobt feature was added we might not be able to
259 * reserve all needed blocks. Warn and fall back to the
260 * old and potentially buggy code in that case, but
261 * ensure we do have the reservation for the refcountbt.
262 */
263 ask = used = 0;
264
265 mp->m_inotbt_nores = true;
266
267 error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
268 &used);
269 if (error)
270 goto out;
271
272 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
273 ask, used);
274 if (error)
275 goto out;
276 }
243 } 277 }
244 278
245 /* Create the AGFL metadata reservation */ 279 /* Create the AGFL metadata reservation */
246 if (pag->pag_agfl_resv.ar_asked == 0) { 280 if (pag->pag_agfl_resv.ar_asked == 0) {
247 ask = used = 0; 281 ask = used = 0;
248 282
249 error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno, 283 error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
250 &ask, &used);
251 if (error) 284 if (error)
252 goto out; 285 goto out;
253 286
@@ -256,9 +289,16 @@ xfs_ag_resv_init(
256 goto out; 289 goto out;
257 } 290 }
258 291
292#ifdef DEBUG
293 /* need to read in the AGF for the ASSERT below to work */
294 error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
295 if (error)
296 return error;
297
259 ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + 298 ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
260 xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <= 299 xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
261 pag->pagf_freeblks + pag->pagf_flcount); 300 pag->pagf_freeblks + pag->pagf_flcount);
301#endif
262out: 302out:
263 return error; 303 return error;
264} 304}
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index af1ecb19121e..6622d46ddec3 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -131,9 +131,6 @@ xfs_attr_get(
131 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 131 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
132 return -EIO; 132 return -EIO;
133 133
134 if (!xfs_inode_hasattr(ip))
135 return -ENOATTR;
136
137 error = xfs_attr_args_init(&args, ip, name, flags); 134 error = xfs_attr_args_init(&args, ip, name, flags);
138 if (error) 135 if (error)
139 return error; 136 return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
392 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 389 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
393 return -EIO; 390 return -EIO;
394 391
395 if (!xfs_inode_hasattr(dp))
396 return -ENOATTR;
397
398 error = xfs_attr_args_init(&args, dp, name, flags); 392 error = xfs_attr_args_init(&args, dp, name, flags);
399 if (error) 393 if (error)
400 return error; 394 return error;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 44773c9eb957..bfc00de5c6f1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
3629 align = xfs_get_cowextsz_hint(ap->ip); 3629 align = xfs_get_cowextsz_hint(ap->ip);
3630 else if (xfs_alloc_is_userdata(ap->datatype)) 3630 else if (xfs_alloc_is_userdata(ap->datatype))
3631 align = xfs_get_extsz_hint(ap->ip); 3631 align = xfs_get_extsz_hint(ap->ip);
3632 if (unlikely(align)) { 3632 if (align) {
3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3634 align, 0, ap->eof, 0, ap->conv, 3634 align, 0, ap->eof, 0, ap->conv,
3635 &ap->offset, &ap->length); 3635 &ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
3701 args.minlen = ap->minlen; 3701 args.minlen = ap->minlen;
3702 } 3702 }
3703 /* apply extent size hints if obtained earlier */ 3703 /* apply extent size hints if obtained earlier */
3704 if (unlikely(align)) { 3704 if (align) {
3705 args.prod = align; 3705 args.prod = align;
3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3707 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3707 args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -4514,8 +4514,6 @@ xfs_bmapi_write(
4514 int n; /* current extent index */ 4514 int n; /* current extent index */
4515 xfs_fileoff_t obno; /* old block number (offset) */ 4515 xfs_fileoff_t obno; /* old block number (offset) */
4516 int whichfork; /* data or attr fork */ 4516 int whichfork; /* data or attr fork */
4517 char inhole; /* current location is hole in file */
4518 char wasdelay; /* old extent was delayed */
4519 4517
4520#ifdef DEBUG 4518#ifdef DEBUG
4521 xfs_fileoff_t orig_bno; /* original block number value */ 4519 xfs_fileoff_t orig_bno; /* original block number value */
@@ -4603,22 +4601,44 @@ xfs_bmapi_write(
4603 bma.firstblock = firstblock; 4601 bma.firstblock = firstblock;
4604 4602
4605 while (bno < end && n < *nmap) { 4603 while (bno < end && n < *nmap) {
4606 inhole = eof || bma.got.br_startoff > bno; 4604 bool need_alloc = false, wasdelay = false;
4607 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4608 4605
4609 /* 4606 /* in hole or beyoned EOF? */
4610 * Make sure we only reflink into a hole. 4607 if (eof || bma.got.br_startoff > bno) {
4611 */ 4608 if (flags & XFS_BMAPI_DELALLOC) {
4612 if (flags & XFS_BMAPI_REMAP) 4609 /*
4613 ASSERT(inhole); 4610 * For the COW fork we can reasonably get a
4614 if (flags & XFS_BMAPI_COWFORK) 4611 * request for converting an extent that races
4615 ASSERT(!inhole); 4612 * with other threads already having converted
4613 * part of it, as there converting COW to
4614 * regular blocks is not protected using the
4615 * IOLOCK.
4616 */
4617 ASSERT(flags & XFS_BMAPI_COWFORK);
4618 if (!(flags & XFS_BMAPI_COWFORK)) {
4619 error = -EIO;
4620 goto error0;
4621 }
4622
4623 if (eof || bno >= end)
4624 break;
4625 } else {
4626 need_alloc = true;
4627 }
4628 } else {
4629 /*
4630 * Make sure we only reflink into a hole.
4631 */
4632 ASSERT(!(flags & XFS_BMAPI_REMAP));
4633 if (isnullstartblock(bma.got.br_startblock))
4634 wasdelay = true;
4635 }
4616 4636
4617 /* 4637 /*
4618 * First, deal with the hole before the allocated space 4638 * First, deal with the hole before the allocated space
4619 * that we found, if any. 4639 * that we found, if any.
4620 */ 4640 */
4621 if (inhole || wasdelay) { 4641 if (need_alloc || wasdelay) {
4622 bma.eof = eof; 4642 bma.eof = eof;
4623 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4643 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4624 bma.wasdel = wasdelay; 4644 bma.wasdel = wasdelay;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index cecd094404cc..cdef87db5262 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
110/* Map something in the CoW fork. */ 110/* Map something in the CoW fork. */
111#define XFS_BMAPI_COWFORK 0x200 111#define XFS_BMAPI_COWFORK 0x200
112 112
113/* Only convert delalloc space, don't allocate entirely new extents */
114#define XFS_BMAPI_DELALLOC 0x400
115
113#define XFS_BMAPI_FLAGS \ 116#define XFS_BMAPI_FLAGS \
114 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 117 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
115 { XFS_BMAPI_METADATA, "METADATA" }, \ 118 { XFS_BMAPI_METADATA, "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
120 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 123 { XFS_BMAPI_CONVERT, "CONVERT" }, \
121 { XFS_BMAPI_ZERO, "ZERO" }, \ 124 { XFS_BMAPI_ZERO, "ZERO" }, \
122 { XFS_BMAPI_REMAP, "REMAP" }, \ 125 { XFS_BMAPI_REMAP, "REMAP" }, \
123 { XFS_BMAPI_COWFORK, "COWFORK" } 126 { XFS_BMAPI_COWFORK, "COWFORK" }, \
127 { XFS_BMAPI_DELALLOC, "DELALLOC" }
124 128
125 129
126static inline int xfs_bmapi_aflag(int w) 130static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 0fd086d03d41..7c471881c9a6 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
82} 82}
83 83
84STATIC int 84STATIC int
85xfs_inobt_alloc_block( 85__xfs_inobt_alloc_block(
86 struct xfs_btree_cur *cur, 86 struct xfs_btree_cur *cur,
87 union xfs_btree_ptr *start, 87 union xfs_btree_ptr *start,
88 union xfs_btree_ptr *new, 88 union xfs_btree_ptr *new,
89 int *stat) 89 int *stat,
90 enum xfs_ag_resv_type resv)
90{ 91{
91 xfs_alloc_arg_t args; /* block allocation args */ 92 xfs_alloc_arg_t args; /* block allocation args */
92 int error; /* error return value */ 93 int error; /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
103 args.maxlen = 1; 104 args.maxlen = 1;
104 args.prod = 1; 105 args.prod = 1;
105 args.type = XFS_ALLOCTYPE_NEAR_BNO; 106 args.type = XFS_ALLOCTYPE_NEAR_BNO;
107 args.resv = resv;
106 108
107 error = xfs_alloc_vextent(&args); 109 error = xfs_alloc_vextent(&args);
108 if (error) { 110 if (error) {
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block(
123} 125}
124 126
125STATIC int 127STATIC int
128xfs_inobt_alloc_block(
129 struct xfs_btree_cur *cur,
130 union xfs_btree_ptr *start,
131 union xfs_btree_ptr *new,
132 int *stat)
133{
134 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
135}
136
137STATIC int
138xfs_finobt_alloc_block(
139 struct xfs_btree_cur *cur,
140 union xfs_btree_ptr *start,
141 union xfs_btree_ptr *new,
142 int *stat)
143{
144 return __xfs_inobt_alloc_block(cur, start, new, stat,
145 XFS_AG_RESV_METADATA);
146}
147
148STATIC int
126xfs_inobt_free_block( 149xfs_inobt_free_block(
127 struct xfs_btree_cur *cur, 150 struct xfs_btree_cur *cur,
128 struct xfs_buf *bp) 151 struct xfs_buf *bp)
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
328 351
329 .dup_cursor = xfs_inobt_dup_cursor, 352 .dup_cursor = xfs_inobt_dup_cursor,
330 .set_root = xfs_finobt_set_root, 353 .set_root = xfs_finobt_set_root,
331 .alloc_block = xfs_inobt_alloc_block, 354 .alloc_block = xfs_finobt_alloc_block,
332 .free_block = xfs_inobt_free_block, 355 .free_block = xfs_inobt_free_block,
333 .get_minrecs = xfs_inobt_get_minrecs, 356 .get_minrecs = xfs_inobt_get_minrecs,
334 .get_maxrecs = xfs_inobt_get_maxrecs, 357 .get_maxrecs = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
480 return 0; 503 return 0;
481} 504}
482#endif /* DEBUG */ 505#endif /* DEBUG */
506
507static xfs_extlen_t
508xfs_inobt_max_size(
509 struct xfs_mount *mp)
510{
511 /* Bail out if we're uninitialized, which can happen in mkfs. */
512 if (mp->m_inobt_mxr[0] == 0)
513 return 0;
514
515 return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
516 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
517 XFS_INODES_PER_CHUNK);
518}
519
520static int
521xfs_inobt_count_blocks(
522 struct xfs_mount *mp,
523 xfs_agnumber_t agno,
524 xfs_btnum_t btnum,
525 xfs_extlen_t *tree_blocks)
526{
527 struct xfs_buf *agbp;
528 struct xfs_btree_cur *cur;
529 int error;
530
531 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
532 if (error)
533 return error;
534
535 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
536 error = xfs_btree_count_blocks(cur, tree_blocks);
537 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
538 xfs_buf_relse(agbp);
539
540 return error;
541}
542
543/*
544 * Figure out how many blocks to reserve and how many are used by this btree.
545 */
546int
547xfs_finobt_calc_reserves(
548 struct xfs_mount *mp,
549 xfs_agnumber_t agno,
550 xfs_extlen_t *ask,
551 xfs_extlen_t *used)
552{
553 xfs_extlen_t tree_len = 0;
554 int error;
555
556 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
557 return 0;
558
559 error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
560 if (error)
561 return error;
562
563 *ask += xfs_inobt_max_size(mp);
564 *used += tree_len;
565 return 0;
566}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453217ce..aa81e2e63f3f 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
72#define xfs_inobt_rec_check_count(mp, rec) 0 72#define xfs_inobt_rec_check_count(mp, rec) 0
73#endif /* DEBUG */ 73#endif /* DEBUG */
74 74
75int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
76 xfs_extlen_t *ask, xfs_extlen_t *used);
77
75#endif /* __XFS_IALLOC_BTREE_H__ */ 78#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 2580262e4ea0..584ec896a533 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
242 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || 242 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
243 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || 243 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
244 sbp->sb_blocksize != (1 << sbp->sb_blocklog) || 244 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
245 sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG || 245 sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
246 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || 246 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
247 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || 247 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
248 sbp->sb_inodelog < XFS_DINODE_MIN_LOG || 248 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index b9abce524c33..c1417919ab0a 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -528,7 +528,6 @@ xfs_getbmap(
528 xfs_bmbt_irec_t *map; /* buffer for user's data */ 528 xfs_bmbt_irec_t *map; /* buffer for user's data */
529 xfs_mount_t *mp; /* file system mount point */ 529 xfs_mount_t *mp; /* file system mount point */
530 int nex; /* # of user extents can do */ 530 int nex; /* # of user extents can do */
531 int nexleft; /* # of user extents left */
532 int subnex; /* # of bmapi's can do */ 531 int subnex; /* # of bmapi's can do */
533 int nmap; /* number of map entries */ 532 int nmap; /* number of map entries */
534 struct getbmapx *out; /* output structure */ 533 struct getbmapx *out; /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
686 goto out_free_map; 685 goto out_free_map;
687 } 686 }
688 687
689 nexleft = nex;
690
691 do { 688 do {
692 nmap = (nexleft > subnex) ? subnex : nexleft; 689 nmap = (nex> subnex) ? subnex : nex;
693 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), 690 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
694 XFS_BB_TO_FSB(mp, bmv->bmv_length), 691 XFS_BB_TO_FSB(mp, bmv->bmv_length),
695 map, &nmap, bmapi_flags); 692 map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
697 goto out_free_map; 694 goto out_free_map;
698 ASSERT(nmap <= subnex); 695 ASSERT(nmap <= subnex);
699 696
700 for (i = 0; i < nmap && nexleft && bmv->bmv_length && 697 for (i = 0; i < nmap && bmv->bmv_length &&
701 cur_ext < bmv->bmv_count; i++) { 698 cur_ext < bmv->bmv_count - 1; i++) {
702 out[cur_ext].bmv_oflags = 0; 699 out[cur_ext].bmv_oflags = 0;
703 if (map[i].br_state == XFS_EXT_UNWRITTEN) 700 if (map[i].br_state == XFS_EXT_UNWRITTEN)
704 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; 701 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
760 continue; 757 continue;
761 } 758 }
762 759
760 /*
761 * In order to report shared extents accurately,
762 * we report each distinct shared/unshared part
763 * of a single bmbt record using multiple bmap
764 * extents. To make that happen, we iterate the
765 * same map array item multiple times, each
766 * time trimming out the subextent that we just
767 * reported.
768 *
769 * Because of this, we must check the out array
770 * index (cur_ext) directly against bmv_count-1
771 * to avoid overflows.
772 */
763 if (inject_map.br_startblock != NULLFSBLOCK) { 773 if (inject_map.br_startblock != NULLFSBLOCK) {
764 map[i] = inject_map; 774 map[i] = inject_map;
765 i--; 775 i--;
766 } else 776 }
767 nexleft--;
768 bmv->bmv_entries++; 777 bmv->bmv_entries++;
769 cur_ext++; 778 cur_ext++;
770 } 779 }
771 } while (nmap && nexleft && bmv->bmv_length && 780 } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
772 cur_ext < bmv->bmv_count);
773 781
774 out_free_map: 782 out_free_map:
775 kmem_free(map); 783 kmem_free(map);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 7f0a01f7b592..ac3b4db519df 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -422,6 +422,7 @@ retry:
422out_free_pages: 422out_free_pages:
423 for (i = 0; i < bp->b_page_count; i++) 423 for (i = 0; i < bp->b_page_count; i++)
424 __free_page(bp->b_pages[i]); 424 __free_page(bp->b_pages[i]);
425 bp->b_flags &= ~_XBF_PAGES;
425 return error; 426 return error;
426} 427}
427 428
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b9557795eb74..de32f0fe47c8 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
1792 int error; 1792 int error;
1793 1793
1794 /* 1794 /*
1795 * The ifree transaction might need to allocate blocks for record 1795 * We try to use a per-AG reservation for any block needed by the finobt
1796 * insertion to the finobt. We don't want to fail here at ENOSPC, so 1796 * tree, but as the finobt feature predates the per-AG reservation
1797 * allow ifree to dip into the reserved block pool if necessary. 1797 * support a degraded file system might not have enough space for the
1798 * 1798 * reservation at mount time. In that case try to dip into the reserved
1799 * Freeing large sets of inodes generally means freeing inode chunks, 1799 * pool and pray.
1800 * directory and file data blocks, so this should be relatively safe.
1801 * Only under severe circumstances should it be possible to free enough
1802 * inodes to exhaust the reserve block pool via finobt expansion while
1803 * at the same time not creating free space in the filesystem.
1804 * 1800 *
1805 * Send a warning if the reservation does happen to fail, as the inode 1801 * Send a warning if the reservation does happen to fail, as the inode
1806 * now remains allocated and sits on the unlinked list until the fs is 1802 * now remains allocated and sits on the unlinked list until the fs is
1807 * repaired. 1803 * repaired.
1808 */ 1804 */
1809 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 1805 if (unlikely(mp->m_inotbt_nores)) {
1810 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); 1806 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1807 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1808 &tp);
1809 } else {
1810 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1811 }
1811 if (error) { 1812 if (error) {
1812 if (error == -ENOSPC) { 1813 if (error == -ENOSPC) {
1813 xfs_warn_ratelimited(mp, 1814 xfs_warn_ratelimited(mp,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0d147428971e..1aa3abd67b36 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
681 xfs_trans_t *tp; 681 xfs_trans_t *tp;
682 int nimaps; 682 int nimaps;
683 int error = 0; 683 int error = 0;
684 int flags = 0; 684 int flags = XFS_BMAPI_DELALLOC;
685 int nres; 685 int nres;
686 686
687 if (whichfork == XFS_COW_FORK) 687 if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 84f785218907..7f351f706b7a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
140 int m_fixedfsid[2]; /* unchanged for life of FS */ 140 int m_fixedfsid[2]; /* unchanged for life of FS */
141 uint m_dmevmask; /* DMI events for this FS */ 141 uint m_dmevmask; /* DMI events for this FS */
142 __uint64_t m_flags; /* global mount flags */ 142 __uint64_t m_flags; /* global mount flags */
143 bool m_inotbt_nores; /* no per-AG finobt resv. */
143 int m_ialloc_inos; /* inodes in inode allocation */ 144 int m_ialloc_inos; /* inodes in inode allocation */
144 int m_ialloc_blks; /* blocks in inode allocation */ 145 int m_ialloc_blks; /* blocks in inode allocation */
145 int m_ialloc_min_blks;/* min blocks in sparse inode 146 int m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 45e50ea90769..b669b123287b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
1177 * the case in all other instances. It's OK that we do this because 1177 * the case in all other instances. It's OK that we do this because
1178 * quotacheck is done only at mount time. 1178 * quotacheck is done only at mount time.
1179 */ 1179 */
1180 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1180 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1181 &ip);
1181 if (error) { 1182 if (error) {
1182 *res = BULKSTAT_RV_NOTHING; 1183 *res = BULKSTAT_RV_NOTHING;
1183 return error; 1184 return error;
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 63554e9f6e0c..719db1968d81 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -9,18 +9,15 @@
9#ifndef KSYM_ALIGN 9#ifndef KSYM_ALIGN
10#define KSYM_ALIGN 8 10#define KSYM_ALIGN 8
11#endif 11#endif
12#ifndef KCRC_ALIGN
13#define KCRC_ALIGN 8
14#endif
15#else 12#else
16#define __put .long 13#define __put .long
17#ifndef KSYM_ALIGN 14#ifndef KSYM_ALIGN
18#define KSYM_ALIGN 4 15#define KSYM_ALIGN 4
19#endif 16#endif
17#endif
20#ifndef KCRC_ALIGN 18#ifndef KCRC_ALIGN
21#define KCRC_ALIGN 4 19#define KCRC_ALIGN 4
22#endif 20#endif
23#endif
24 21
25#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 22#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
26#define KSYM(name) _##name 23#define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
52 .section ___kcrctab\sec+\name,"a" 49 .section ___kcrctab\sec+\name,"a"
53 .balign KCRC_ALIGN 50 .balign KCRC_ALIGN
54KSYM(__kcrctab_\name): 51KSYM(__kcrctab_\name):
55 __put KSYM(__crc_\name) 52#if defined(CONFIG_MODULE_REL_CRCS)
53 .long KSYM(__crc_\name) - .
54#else
55 .long KSYM(__crc_\name)
56#endif
56 .weak KSYM(__crc_\name) 57 .weak KSYM(__crc_\name)
57 .previous 58 .previous
58#endif 59#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 192016e2b518..9c4ee144b5f6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -517,6 +517,7 @@ struct drm_device {
517 struct drm_minor *control; /**< Control node */ 517 struct drm_minor *control; /**< Control node */
518 struct drm_minor *primary; /**< Primary node */ 518 struct drm_minor *primary; /**< Primary node */
519 struct drm_minor *render; /**< Render node */ 519 struct drm_minor *render; /**< Render node */
520 bool registered;
520 521
521 /* currently active master for this device. Protected by master_mutex */ 522 /* currently active master for this device. Protected by master_mutex */
522 struct drm_master *master; 523 struct drm_master *master;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d6d241f63b9f..56814e8ae7ea 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
144 struct drm_crtc *ptr; 144 struct drm_crtc *ptr;
145 struct drm_crtc_state *state; 145 struct drm_crtc_state *state;
146 struct drm_crtc_commit *commit; 146 struct drm_crtc_commit *commit;
147 s64 __user *out_fence_ptr; 147 s32 __user *out_fence_ptr;
148}; 148};
149 149
150struct __drm_connnectors_state { 150struct __drm_connnectors_state {
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index a9b95246e26e..045a97cbeba2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
381 * core drm connector interfaces. Everything added from this callback 381 * core drm connector interfaces. Everything added from this callback
382 * should be unregistered in the early_unregister callback. 382 * should be unregistered in the early_unregister callback.
383 * 383 *
384 * This is called while holding drm_connector->mutex.
385 *
384 * Returns: 386 * Returns:
385 * 387 *
386 * 0 on success, or a negative error code on failure. 388 * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
395 * late_register(). It is called from drm_connector_unregister(), 397 * late_register(). It is called from drm_connector_unregister(),
396 * early in the driver unload sequence to disable userspace access 398 * early in the driver unload sequence to disable userspace access
397 * before data structures are torndown. 399 * before data structures are torndown.
400 *
401 * This is called while holding drm_connector->mutex.
398 */ 402 */
399 void (*early_unregister)(struct drm_connector *connector); 403 void (*early_unregister)(struct drm_connector *connector);
400 404
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
559 * @interlace_allowed: can this connector handle interlaced modes? 563 * @interlace_allowed: can this connector handle interlaced modes?
560 * @doublescan_allowed: can this connector handle doublescan? 564 * @doublescan_allowed: can this connector handle doublescan?
561 * @stereo_allowed: can this connector handle stereo modes? 565 * @stereo_allowed: can this connector handle stereo modes?
562 * @registered: is this connector exposed (registered) with userspace?
563 * @modes: modes available on this connector (from fill_modes() + user) 566 * @modes: modes available on this connector (from fill_modes() + user)
564 * @status: one of the drm_connector_status enums (connected, not, or unknown) 567 * @status: one of the drm_connector_status enums (connected, not, or unknown)
565 * @probed_modes: list of modes derived directly from the display 568 * @probed_modes: list of modes derived directly from the display
@@ -608,6 +611,13 @@ struct drm_connector {
608 char *name; 611 char *name;
609 612
610 /** 613 /**
614 * @mutex: Lock for general connector state, but currently only protects
615 * @registered. Most of the connector state is still protected by the
616 * mutex in &drm_mode_config.
617 */
618 struct mutex mutex;
619
620 /**
611 * @index: Compacted connector index, which matches the position inside 621 * @index: Compacted connector index, which matches the position inside
612 * the mode_config.list for drivers not supporting hot-add/removing. Can 622 * the mode_config.list for drivers not supporting hot-add/removing. Can
613 * be used as an array index. It is invariant over the lifetime of the 623 * be used as an array index. It is invariant over the lifetime of the
@@ -620,6 +630,10 @@ struct drm_connector {
620 bool interlace_allowed; 630 bool interlace_allowed;
621 bool doublescan_allowed; 631 bool doublescan_allowed;
622 bool stereo_allowed; 632 bool stereo_allowed;
633 /**
634 * @registered: Is this connector exposed (registered) with userspace?
635 * Protected by @mutex.
636 */
623 bool registered; 637 bool registered;
624 struct list_head modes; /* list of modes on this connector */ 638 struct list_head modes; /* list of modes on this connector */
625 639
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index bf9991b20611..137432386310 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -488,7 +488,7 @@ struct drm_mode_config {
488 /** 488 /**
489 * @prop_out_fence_ptr: Sync File fd pointer representing the 489 * @prop_out_fence_ptr: Sync File fd pointer representing the
490 * outgoing fences for a CRTC. Userspace should provide a pointer to a 490 * outgoing fences for a CRTC. Userspace should provide a pointer to a
491 * value of type s64, and then cast that pointer to u64. 491 * value of type s32, and then cast that pointer to u64.
492 */ 492 */
493 struct drm_property *prop_out_fence_ptr; 493 struct drm_property *prop_out_fence_ptr;
494 /** 494 /**
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 05cf951df3fe..3ed1f3b1d594 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
247void bpf_map_put_with_uref(struct bpf_map *map); 247void bpf_map_put_with_uref(struct bpf_map *map);
248void bpf_map_put(struct bpf_map *map); 248void bpf_map_put(struct bpf_map *map);
249int bpf_map_precharge_memlock(u32 pages); 249int bpf_map_precharge_memlock(u32 pages);
250void *bpf_map_area_alloc(size_t size);
251void bpf_map_area_free(void *base);
250 252
251extern int sysctl_unprivileged_bpf_disabled; 253extern int sysctl_unprivileged_bpf_disabled;
252 254
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a0875001b13c..df08a41d5be5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@ struct can_proto {
45extern int can_proto_register(const struct can_proto *cp); 45extern int can_proto_register(const struct can_proto *cp);
46extern void can_proto_unregister(const struct can_proto *cp); 46extern void can_proto_unregister(const struct can_proto *cp);
47 47
48extern int can_rx_register(struct net_device *dev, canid_t can_id, 48int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
49 canid_t mask, 49 void (*func)(struct sk_buff *, void *),
50 void (*func)(struct sk_buff *, void *), 50 void *data, char *ident, struct sock *sk);
51 void *data, char *ident);
52 51
53extern void can_rx_unregister(struct net_device *dev, canid_t can_id, 52extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
54 canid_t mask, 53 canid_t mask,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index d936a0021839..921acaaa1601 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,9 +8,7 @@ enum cpuhp_state {
8 CPUHP_CREATE_THREADS, 8 CPUHP_CREATE_THREADS,
9 CPUHP_PERF_PREPARE, 9 CPUHP_PERF_PREPARE,
10 CPUHP_PERF_X86_PREPARE, 10 CPUHP_PERF_X86_PREPARE,
11 CPUHP_PERF_X86_UNCORE_PREP,
12 CPUHP_PERF_X86_AMD_UNCORE_PREP, 11 CPUHP_PERF_X86_AMD_UNCORE_PREP,
13 CPUHP_PERF_X86_RAPL_PREP,
14 CPUHP_PERF_BFIN, 12 CPUHP_PERF_BFIN,
15 CPUHP_PERF_POWER, 13 CPUHP_PERF_POWER,
16 CPUHP_PERF_SUPERH, 14 CPUHP_PERF_SUPERH,
@@ -86,7 +84,6 @@ enum cpuhp_state {
86 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 84 CPUHP_AP_IRQ_ARMADA_XP_STARTING,
87 CPUHP_AP_IRQ_BCM2836_STARTING, 85 CPUHP_AP_IRQ_BCM2836_STARTING,
88 CPUHP_AP_ARM_MVEBU_COHERENCY, 86 CPUHP_AP_ARM_MVEBU_COHERENCY,
89 CPUHP_AP_PERF_X86_UNCORE_STARTING,
90 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 87 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
91 CPUHP_AP_PERF_X86_STARTING, 88 CPUHP_AP_PERF_X86_STARTING,
92 CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 89 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
diff --git a/include/linux/export.h b/include/linux/export.h
index 2a0f61fbc731..1a1dfdb2a5c6 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,12 +43,19 @@ extern struct module __this_module;
43#ifdef CONFIG_MODVERSIONS 43#ifdef CONFIG_MODVERSIONS
44/* Mark the CRC weak since genksyms apparently decides not to 44/* Mark the CRC weak since genksyms apparently decides not to
45 * generate a checksums for some symbols */ 45 * generate a checksums for some symbols */
46#if defined(CONFIG_MODULE_REL_CRCS)
46#define __CRC_SYMBOL(sym, sec) \ 47#define __CRC_SYMBOL(sym, sec) \
47 extern __visible void *__crc_##sym __attribute__((weak)); \ 48 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
48 static const unsigned long __kcrctab_##sym \ 49 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
49 __used \ 50 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
50 __attribute__((section("___kcrctab" sec "+" #sym), used)) \ 51 " .previous \n");
51 = (unsigned long) &__crc_##sym; 52#else
53#define __CRC_SYMBOL(sym, sec) \
54 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
55 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
56 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
57 " .previous \n");
58#endif
52#else 59#else
53#define __CRC_SYMBOL(sym, sec) 60#define __CRC_SYMBOL(sym, sec)
54#endif 61#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552e6c09..4c467ef50159 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@ struct fscache_object {
360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ 360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ 361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ 362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
363#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
363 364
364 struct list_head cache_link; /* link in cache->object_list */ 365 struct list_head cache_link; /* link in cache->object_list */
365 struct hlist_node cookie_link; /* link in cookie->backing_objects */ 366 struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748accea71..e973faba69dc 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
274 struct irq_chip *irqchip, 274 struct irq_chip *irqchip,
275 int parent_irq); 275 int parent_irq);
276 276
277int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 277int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip,
279 unsigned int first_irq,
280 irq_flow_handler_t handler,
281 unsigned int type,
282 bool nested,
283 struct lock_class_key *lock_key);
284
285#ifdef CONFIG_LOCKDEP
286
287/*
288 * Lockdep requires that each irqchip instance be created with a
289 * unique key so as to avoid unnecessary warnings. This upfront
290 * boilerplate static inlines provides such a key for each
291 * unique instance.
292 */
293static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
294 struct irq_chip *irqchip,
295 unsigned int first_irq,
296 irq_flow_handler_t handler,
297 unsigned int type)
298{
299 static struct lock_class_key key;
300
301 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
302 handler, type, false, &key);
303}
304
305static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip, 306 struct irq_chip *irqchip,
279 unsigned int first_irq, 307 unsigned int first_irq,
280 irq_flow_handler_t handler, 308 irq_flow_handler_t handler,
281 unsigned int type, 309 unsigned int type)
282 bool nested, 310{
283 struct lock_class_key *lock_key); 311
312 static struct lock_class_key key;
313
314 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
315 handler, type, true, &key);
316}
317#else
318static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
319 struct irq_chip *irqchip,
320 unsigned int first_irq,
321 irq_flow_handler_t handler,
322 unsigned int type)
323{
324 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
325 handler, type, false, NULL);
326}
284 327
285/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
286static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 328static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
287 struct irq_chip *irqchip, 329 struct irq_chip *irqchip,
288 unsigned int first_irq, 330 unsigned int first_irq,
289 irq_flow_handler_t handler, 331 irq_flow_handler_t handler,
290 unsigned int type) 332 unsigned int type)
291{ 333{
292 return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, 334 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
293 handler, type, true, NULL); 335 handler, type, true, NULL);
294} 336}
295 337#endif /* CONFIG_LOCKDEP */
296#ifdef CONFIG_LOCKDEP
297#define gpiochip_irqchip_add(...) \
298( \
299 ({ \
300 static struct lock_class_key _key; \
301 _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
302 }) \
303)
304#else
305#define gpiochip_irqchip_add(...) \
306 _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
307#endif
308 338
309#endif /* CONFIG_GPIOLIB_IRQCHIP */ 339#endif /* CONFIG_GPIOLIB_IRQCHIP */
310 340
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 42fe43fb0c80..183efde54269 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
128 u32 ring_data_startoffset; 128 u32 ring_data_startoffset;
129 u32 priv_write_index; 129 u32 priv_write_index;
130 u32 priv_read_index; 130 u32 priv_read_index;
131 u32 cached_read_index;
131}; 132};
132 133
133/* 134/*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
180 return write; 181 return write;
181} 182}
182 183
184static inline u32 hv_get_cached_bytes_to_write(
185 const struct hv_ring_buffer_info *rbi)
186{
187 u32 read_loc, write_loc, dsize, write;
188
189 dsize = rbi->ring_datasize;
190 read_loc = rbi->cached_read_index;
191 write_loc = rbi->ring_buffer->write_index;
192
193 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
194 read_loc - write_loc;
195 return write;
196}
183/* 197/*
184 * VMBUS version is 32 bit entity broken up into 198 * VMBUS version is 32 bit entity broken up into
185 * two 16 bit quantities: major_number. minor_number. 199 * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1488 1502
1489static inline void hv_signal_on_read(struct vmbus_channel *channel) 1503static inline void hv_signal_on_read(struct vmbus_channel *channel)
1490{ 1504{
1491 u32 cur_write_sz; 1505 u32 cur_write_sz, cached_write_sz;
1492 u32 pending_sz; 1506 u32 pending_sz;
1493 struct hv_ring_buffer_info *rbi = &channel->inbound; 1507 struct hv_ring_buffer_info *rbi = &channel->inbound;
1494 1508
@@ -1512,12 +1526,24 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
1512 1526
1513 cur_write_sz = hv_get_bytes_to_write(rbi); 1527 cur_write_sz = hv_get_bytes_to_write(rbi);
1514 1528
1515 if (cur_write_sz >= pending_sz) 1529 if (cur_write_sz < pending_sz)
1530 return;
1531
1532 cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1533 if (cached_write_sz < pending_sz)
1516 vmbus_setevent(channel); 1534 vmbus_setevent(channel);
1517 1535
1518 return; 1536 return;
1519} 1537}
1520 1538
1539static inline void
1540init_cached_read_index(struct vmbus_channel *channel)
1541{
1542 struct hv_ring_buffer_info *rbi = &channel->inbound;
1543
1544 rbi->cached_read_index = rbi->ring_buffer->read_index;
1545}
1546
1521/* 1547/*
1522 * An API to support in-place processing of incoming VMBUS packets. 1548 * An API to support in-place processing of incoming VMBUS packets.
1523 */ 1549 */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
1569 * This call commits the read index and potentially signals the host. 1595 * This call commits the read index and potentially signals the host.
1570 * Here is the pattern for using the "in-place" consumption APIs: 1596 * Here is the pattern for using the "in-place" consumption APIs:
1571 * 1597 *
1598 * init_cached_read_index();
1599 *
1572 * while (get_next_pkt_raw() { 1600 * while (get_next_pkt_raw() {
1573 * process the packet "in-place"; 1601 * process the packet "in-place";
1574 * put_pkt_raw(); 1602 * put_pkt_raw();
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e79875574b39..39e3254e5769 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -184,6 +184,7 @@ struct irq_data {
184 * 184 *
185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
187 * IRQD_ACTIVATED - Interrupt has already been activated
187 * IRQD_NO_BALANCING - Balancing disabled for this IRQ 188 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
188 * IRQD_PER_CPU - Interrupt is per cpu 189 * IRQD_PER_CPU - Interrupt is per cpu
189 * IRQD_AFFINITY_SET - Interrupt affinity was set 190 * IRQD_AFFINITY_SET - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
202enum { 203enum {
203 IRQD_TRIGGER_MASK = 0xf, 204 IRQD_TRIGGER_MASK = 0xf,
204 IRQD_SETAFFINITY_PENDING = (1 << 8), 205 IRQD_SETAFFINITY_PENDING = (1 << 8),
206 IRQD_ACTIVATED = (1 << 9),
205 IRQD_NO_BALANCING = (1 << 10), 207 IRQD_NO_BALANCING = (1 << 10),
206 IRQD_PER_CPU = (1 << 11), 208 IRQD_PER_CPU = (1 << 11),
207 IRQD_AFFINITY_SET = (1 << 12), 209 IRQD_AFFINITY_SET = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 314 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313} 315}
314 316
317static inline bool irqd_is_activated(struct irq_data *d)
318{
319 return __irqd_to_state(d) & IRQD_ACTIVATED;
320}
321
322static inline void irqd_set_activated(struct irq_data *d)
323{
324 __irqd_to_state(d) |= IRQD_ACTIVATED;
325}
326
327static inline void irqd_clr_activated(struct irq_data *d)
328{
329 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
330}
331
315#undef __irqd_to_state 332#undef __irqd_to_state
316 333
317static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 334static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d91e6a..ef3d4f67118c 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
203 * ... and so on. 203 * ... and so on.
204 */ 204 */
205 205
206#define order_base_2(n) ilog2(roundup_pow_of_two(n)) 206static inline __attribute_const__
207int __order_base_2(unsigned long n)
208{
209 return n > 1 ? ilog2(n - 1) + 1 : 0;
210}
207 211
212#define order_base_2(n) \
213( \
214 __builtin_constant_p(n) ? ( \
215 ((n) == 0 || (n) == 1) ? 0 : \
216 ilog2((n) - 1) + 1) : \
217 __order_base_2(n) \
218)
208#endif /* _LINUX_LOG2_H */ 219#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fadea47..134a2f69c21a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
86/* VM interface that may be used by firmware interface */ 86/* VM interface that may be used by firmware interface */
87extern int online_pages(unsigned long, unsigned long, int); 87extern int online_pages(unsigned long, unsigned long, int);
88extern int test_pages_in_a_zone(unsigned long, unsigned long); 88extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
89 unsigned long *valid_start, unsigned long *valid_end);
89extern void __offline_isolated_pages(unsigned long, unsigned long); 90extern void __offline_isolated_pages(unsigned long, unsigned long);
90 91
91typedef void (*online_page_callback_t)(struct page *page); 92typedef void (*online_page_callback_t)(struct page *page);
@@ -284,7 +285,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
284 unsigned long map_offset); 285 unsigned long map_offset);
285extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 286extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
286 unsigned long pnum); 287 unsigned long pnum);
287extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 288extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
288 enum zone_type target); 289 enum zone_type target, int *zone_shift);
289 290
290#endif /* __LINUX_MEMORY_HOTPLUG_H */ 291#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e0095e..f541da68d1e7 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
35#define PHY_ID_KSZ886X 0x00221430 35#define PHY_ID_KSZ886X 0x00221430
36#define PHY_ID_KSZ8863 0x00221435 36#define PHY_ID_KSZ8863 0x00221435
37 37
38#define PHY_ID_KSZ8795 0x00221550
39
38/* struct phy_device dev_flags definitions */ 40/* struct phy_device dev_flags definitions */
39#define MICREL_PHY_50MHZ_CLK 0x00000001 41#define MICREL_PHY_50MHZ_CLK 0x00000001
40#define MICREL_PHY_FXEN 0x00000002 42#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896fbc1e..f4aac87adcc3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
972 * @zonelist - The zonelist to search for a suitable zone 972 * @zonelist - The zonelist to search for a suitable zone
973 * @highest_zoneidx - The zone index of the highest zone to return 973 * @highest_zoneidx - The zone index of the highest zone to return
974 * @nodes - An optional nodemask to filter the zonelist with 974 * @nodes - An optional nodemask to filter the zonelist with
975 * @zone - The first suitable zone found is returned via this parameter 975 * @return - Zoneref pointer for the first suitable zone found (see below)
976 * 976 *
977 * This function returns the first zone at or below a given zone index that is 977 * This function returns the first zone at or below a given zone index that is
978 * within the allowed nodemask. The zoneref returned is a cursor that can be 978 * within the allowed nodemask. The zoneref returned is a cursor that can be
979 * used to iterate the zonelist with next_zones_zonelist by advancing it by 979 * used to iterate the zonelist with next_zones_zonelist by advancing it by
980 * one before calling. 980 * one before calling.
981 *
982 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
983 * never NULL). This may happen either genuinely, or due to concurrent nodemask
984 * update due to cpuset modification.
981 */ 985 */
982static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 986static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
983 enum zone_type highest_zoneidx, 987 enum zone_type highest_zoneidx,
diff --git a/include/linux/module.h b/include/linux/module.h
index 7c84273d60b9..cc7cba219b20 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -346,7 +346,7 @@ struct module {
346 346
347 /* Exported symbols */ 347 /* Exported symbols */
348 const struct kernel_symbol *syms; 348 const struct kernel_symbol *syms;
349 const unsigned long *crcs; 349 const s32 *crcs;
350 unsigned int num_syms; 350 unsigned int num_syms;
351 351
352 /* Kernel parameters. */ 352 /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
359 /* GPL-only exported symbols. */ 359 /* GPL-only exported symbols. */
360 unsigned int num_gpl_syms; 360 unsigned int num_gpl_syms;
361 const struct kernel_symbol *gpl_syms; 361 const struct kernel_symbol *gpl_syms;
362 const unsigned long *gpl_crcs; 362 const s32 *gpl_crcs;
363 363
364#ifdef CONFIG_UNUSED_SYMBOLS 364#ifdef CONFIG_UNUSED_SYMBOLS
365 /* unused exported symbols. */ 365 /* unused exported symbols. */
366 const struct kernel_symbol *unused_syms; 366 const struct kernel_symbol *unused_syms;
367 const unsigned long *unused_crcs; 367 const s32 *unused_crcs;
368 unsigned int num_unused_syms; 368 unsigned int num_unused_syms;
369 369
370 /* GPL-only, unused exported symbols. */ 370 /* GPL-only, unused exported symbols. */
371 unsigned int num_unused_gpl_syms; 371 unsigned int num_unused_gpl_syms;
372 const struct kernel_symbol *unused_gpl_syms; 372 const struct kernel_symbol *unused_gpl_syms;
373 const unsigned long *unused_gpl_crcs; 373 const s32 *unused_gpl_crcs;
374#endif 374#endif
375 375
376#ifdef CONFIG_MODULE_SIG 376#ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
382 382
383 /* symbols that will be GPL-only in the near future. */ 383 /* symbols that will be GPL-only in the near future. */
384 const struct kernel_symbol *gpl_future_syms; 384 const struct kernel_symbol *gpl_future_syms;
385 const unsigned long *gpl_future_crcs; 385 const s32 *gpl_future_crcs;
386 unsigned int num_gpl_future_syms; 386 unsigned int num_gpl_future_syms;
387 387
388 /* Exception table */ 388 /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
523 523
524struct symsearch { 524struct symsearch {
525 const struct kernel_symbol *start, *stop; 525 const struct kernel_symbol *start, *stop;
526 const unsigned long *crcs; 526 const s32 *crcs;
527 enum { 527 enum {
528 NOT_GPL_ONLY, 528 NOT_GPL_ONLY,
529 GPL_ONLY, 529 GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
539 */ 539 */
540const struct kernel_symbol *find_symbol(const char *name, 540const struct kernel_symbol *find_symbol(const char *name,
541 struct module **owner, 541 struct module **owner,
542 const unsigned long **crc, 542 const s32 **crc,
543 bool gplok, 543 bool gplok,
544 bool warn); 544 bool warn);
545 545
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9bde9558b596..70ad0291d517 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -866,11 +866,15 @@ struct netdev_xdp {
866 * of useless work if you return NETDEV_TX_BUSY. 866 * of useless work if you return NETDEV_TX_BUSY.
867 * Required; cannot be NULL. 867 * Required; cannot be NULL.
868 * 868 *
869 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 869 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
870 * netdev_features_t features); 870 * struct net_device *dev
871 * Adjusts the requested feature flags according to device-specific 871 * netdev_features_t features);
872 * constraints, and returns the resulting flags. Must not modify 872 * Called by core transmit path to determine if device is capable of
873 * the device state. 873 * performing offload operations on a given packet. This is to give
874 * the device an opportunity to implement any restrictions that cannot
875 * be otherwise expressed by feature flags. The check is called with
876 * the set of features that the stack has calculated and it returns
877 * those the driver believes to be appropriate.
874 * 878 *
875 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 879 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
876 * void *accel_priv, select_queue_fallback_t fallback); 880 * void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
1028 * Called to release previously enslaved netdev. 1032 * Called to release previously enslaved netdev.
1029 * 1033 *
1030 * Feature/offload setting functions. 1034 * Feature/offload setting functions.
1035 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1036 * netdev_features_t features);
1037 * Adjusts the requested feature flags according to device-specific
1038 * constraints, and returns the resulting flags. Must not modify
1039 * the device state.
1040 *
1031 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1041 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1032 * Called to update device configuration to new features. Passed 1042 * Called to update device configuration to new features. Passed
1033 * feature set might be less than what was returned by ndo_fix_features()). 1043 * feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
1100 * Callback to use for xmit over the accelerated station. This 1110 * Callback to use for xmit over the accelerated station. This
1101 * is used in place of ndo_start_xmit on accelerated net 1111 * is used in place of ndo_start_xmit on accelerated net
1102 * devices. 1112 * devices.
1103 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1104 * struct net_device *dev
1105 * netdev_features_t features);
1106 * Called by core transmit path to determine if device is capable of
1107 * performing offload operations on a given packet. This is to give
1108 * the device an opportunity to implement any restrictions that cannot
1109 * be otherwise expressed by feature flags. The check is called with
1110 * the set of features that the stack has calculated and it returns
1111 * those the driver believes to be appropriate.
1112 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1113 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1113 * int queue_index, u32 maxrate); 1114 * int queue_index, u32 maxrate);
1114 * Called when a user wants to set a max-rate limitation of specific 1115 * Called when a user wants to set a max-rate limitation of specific
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bca536341d1a..1b1ca04820a3 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -282,7 +282,7 @@ enum nfsstat4 {
282 282
283static inline bool seqid_mutating_err(u32 err) 283static inline bool seqid_mutating_err(u32 err)
284{ 284{
285 /* rfc 3530 section 8.1.5: */ 285 /* See RFC 7530, section 9.1.7 */
286 switch (err) { 286 switch (err) {
287 case NFS4ERR_STALE_CLIENTID: 287 case NFS4ERR_STALE_CLIENTID:
288 case NFS4ERR_STALE_STATEID: 288 case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
291 case NFS4ERR_BADXDR: 291 case NFS4ERR_BADXDR:
292 case NFS4ERR_RESOURCE: 292 case NFS4ERR_RESOURCE:
293 case NFS4ERR_NOFILEHANDLE: 293 case NFS4ERR_NOFILEHANDLE:
294 case NFS4ERR_MOVED:
294 return false; 295 return false;
295 }; 296 };
296 return true; 297 return true;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca824a6ae..0a3fadc32693 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
110extern int watchdog_thresh; 110extern int watchdog_thresh;
111extern unsigned long watchdog_enabled; 111extern unsigned long watchdog_enabled;
112extern unsigned long *watchdog_cpumask_bits; 112extern unsigned long *watchdog_cpumask_bits;
113extern atomic_t watchdog_park_in_progress;
113#ifdef CONFIG_SMP 114#ifdef CONFIG_SMP
114extern int sysctl_softlockup_all_cpu_backtrace; 115extern int sysctl_softlockup_all_cpu_backtrace;
115extern int sysctl_hardlockup_all_cpu_backtrace; 116extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 1c7eec09e5eb..3a481a49546e 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
204static inline bool percpu_ref_tryget(struct percpu_ref *ref) 204static inline bool percpu_ref_tryget(struct percpu_ref *ref)
205{ 205{
206 unsigned long __percpu *percpu_count; 206 unsigned long __percpu *percpu_count;
207 int ret; 207 bool ret;
208 208
209 rcu_read_lock_sched(); 209 rcu_read_lock_sched();
210 210
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
239{ 239{
240 unsigned long __percpu *percpu_count; 240 unsigned long __percpu *percpu_count;
241 int ret = false; 241 bool ret = false;
242 242
243 rcu_read_lock_sched(); 243 rcu_read_lock_sched();
244 244
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f7d95f644eed..7fc1105605bf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mod_devicetable.h> 27#include <linux/mod_devicetable.h>
28#include <linux/phy_led_triggers.h>
29 28
30#include <linux/atomic.h> 29#include <linux/atomic.h>
31 30
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0a37d2..b37b05bfd1a6 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
18#ifdef CONFIG_LED_TRIGGER_PHY 18#ifdef CONFIG_LED_TRIGGER_PHY
19 19
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/phy.h>
21 22
22#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 23#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
23#define PHY_MII_BUS_ID_SIZE (20 - 3)
24 24
25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ 25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
26 FIELD_SIZEOF(struct mdio_device, addr)+\ 26 FIELD_SIZEOF(struct mdio_device, addr)+\
27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) 27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
28 28
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 85cc819676e8..333ad11b3dd9 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
216void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); 216void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
217bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 217bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
218 const struct sockaddr *sap); 218 const struct sockaddr *sap);
219void rpc_cleanup_clids(void);
219#endif /* __KERNEL__ */ 220#endif /* __KERNEL__ */
220#endif /* _LINUX_SUNRPC_CLNT_H */ 221#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3c8549..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
194}; 194};
195 195
196#ifdef CONFIG_SUSPEND 196#ifdef CONFIG_SUSPEND
197extern suspend_state_t mem_sleep_default;
198
199/** 197/**
200 * suspend_set_ops - set platform dependent suspend operations 198 * suspend_set_ops - set platform dependent suspend operations
201 * @ops: The new suspend operations to set. 199 * @ops: The new suspend operations to set.
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 66204007d7ac..5209b5ed2a64 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
56 56
57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, 57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
58 struct virtio_net_hdr *hdr, 58 struct virtio_net_hdr *hdr,
59 bool little_endian) 59 bool little_endian,
60 bool has_data_valid)
60{ 61{
61 memset(hdr, 0, sizeof(*hdr)); /* no info leak */ 62 memset(hdr, 0, sizeof(*hdr)); /* no info leak */
62 63
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
91 skb_checksum_start_offset(skb)); 92 skb_checksum_start_offset(skb));
92 hdr->csum_offset = __cpu_to_virtio16(little_endian, 93 hdr->csum_offset = __cpu_to_virtio16(little_endian,
93 skb->csum_offset); 94 skb->csum_offset);
94 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 95 } else if (has_data_valid &&
96 skb->ip_summed == CHECKSUM_UNNECESSARY) {
95 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 97 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
96 } /* else everything is zero */ 98 } /* else everything is zero */
97 99
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 487e57391664..dbf0abba33b8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
776{ 776{
777 u32 hash; 777 u32 hash;
778 778
779 /* @flowlabel may include more than a flow label, eg, the traffic class.
780 * Here we want only the flow label value.
781 */
782 flowlabel &= IPV6_FLOWLABEL_MASK;
783
779 if (flowlabel || 784 if (flowlabel ||
780 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || 785 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
781 (!autolabel && 786 (!autolabel &&
@@ -871,7 +876,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
871 * upper-layer output functions 876 * upper-layer output functions
872 */ 877 */
873int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 878int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
874 struct ipv6_txoptions *opt, int tclass); 879 __u32 mark, struct ipv6_txoptions *opt, int tclass);
875 880
876int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); 881int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
877 882
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index d4c1c75b8862..73dd87647460 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
44 int (*get_encap_size)(struct lwtunnel_state *lwtstate); 44 int (*get_encap_size)(struct lwtunnel_state *lwtstate);
45 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b); 45 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
46 int (*xmit)(struct sk_buff *skb); 46 int (*xmit)(struct sk_buff *skb);
47
48 struct module *owner;
47}; 49};
48 50
49#ifdef CONFIG_LWTUNNEL 51#ifdef CONFIG_LWTUNNEL
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
105 unsigned int num); 107 unsigned int num);
106int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, 108int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
107 unsigned int num); 109 unsigned int num);
110int lwtunnel_valid_encap_type(u16 encap_type);
111int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
108int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 112int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
109 struct nlattr *encap, 113 struct nlattr *encap,
110 unsigned int family, const void *cfg, 114 unsigned int family, const void *cfg,
@@ -168,6 +172,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
168 return -EOPNOTSUPP; 172 return -EOPNOTSUPP;
169} 173}
170 174
175static inline int lwtunnel_valid_encap_type(u16 encap_type)
176{
177 return -EOPNOTSUPP;
178}
179static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
180{
181 return -EOPNOTSUPP;
182}
183
171static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 184static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
172 struct nlattr *encap, 185 struct nlattr *encap,
173 unsigned int family, const void *cfg, 186 unsigned int family, const void *cfg,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 924325c46aab..7dfdb517f0be 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -207,9 +207,9 @@ struct nft_set_iter {
207 unsigned int skip; 207 unsigned int skip;
208 int err; 208 int err;
209 int (*fn)(const struct nft_ctx *ctx, 209 int (*fn)(const struct nft_ctx *ctx,
210 const struct nft_set *set, 210 struct nft_set *set,
211 const struct nft_set_iter *iter, 211 const struct nft_set_iter *iter,
212 const struct nft_set_elem *elem); 212 struct nft_set_elem *elem);
213}; 213};
214 214
215/** 215/**
@@ -301,7 +301,7 @@ struct nft_set_ops {
301 void (*remove)(const struct nft_set *set, 301 void (*remove)(const struct nft_set *set,
302 const struct nft_set_elem *elem); 302 const struct nft_set_elem *elem);
303 void (*walk)(const struct nft_ctx *ctx, 303 void (*walk)(const struct nft_ctx *ctx,
304 const struct nft_set *set, 304 struct nft_set *set,
305 struct nft_set_iter *iter); 305 struct nft_set_iter *iter);
306 306
307 unsigned int (*privsize)(const struct nlattr * const nla[]); 307 unsigned int (*privsize)(const struct nlattr * const nla[]);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index cbedda077db2..5ceb2205e4e3 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -9,6 +9,12 @@ struct nft_fib {
9 9
10extern const struct nla_policy nft_fib_policy[]; 10extern const struct nla_policy nft_fib_policy[];
11 11
12static inline bool
13nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
14{
15 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
16}
17
12int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); 18int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
13int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 19int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
14 const struct nlattr * const tb[]); 20 const struct nlattr * const tb[]);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 958a24d8fae7..b567e4452a47 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
352 } 352 }
353} 353}
354 354
355static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
356{
357 if (mtu >= 4096)
358 return IB_MTU_4096;
359 else if (mtu >= 2048)
360 return IB_MTU_2048;
361 else if (mtu >= 1024)
362 return IB_MTU_1024;
363 else if (mtu >= 512)
364 return IB_MTU_512;
365 else
366 return IB_MTU_256;
367}
368
355enum ib_port_state { 369enum ib_port_state {
356 IB_PORT_NOP = 0, 370 IB_PORT_NOP = 0,
357 IB_PORT_DOWN = 1, 371 IB_PORT_DOWN = 1,
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 6902c2a8bd23..4b6b489a8d7c 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -55,17 +55,17 @@ struct mcip_cmd {
55 55
56struct mcip_bcr { 56struct mcip_bcr {
57#ifdef CONFIG_CPU_BIG_ENDIAN 57#ifdef CONFIG_CPU_BIG_ENDIAN
58 unsigned int pad3:8, 58 unsigned int pad4:6, pw_dom:1, pad3:1,
59 idu:1, llm:1, num_cores:6, 59 idu:1, pad2:1, num_cores:6,
60 iocoh:1, gfrc:1, dbg:1, pad2:1, 60 pad:1, gfrc:1, dbg:1, pw:1,
61 msg:1, sem:1, ipi:1, pad:1, 61 msg:1, sem:1, ipi:1, slv:1,
62 ver:8; 62 ver:8;
63#else 63#else
64 unsigned int ver:8, 64 unsigned int ver:8,
65 pad:1, ipi:1, sem:1, msg:1, 65 slv:1, ipi:1, sem:1, msg:1,
66 pad2:1, dbg:1, gfrc:1, iocoh:1, 66 pw:1, dbg:1, gfrc:1, pad:1,
67 num_cores:6, llm:1, idu:1, 67 num_cores:6, pad2:1, idu:1,
68 pad3:8; 68 pad3:1, pw_dom:1, pad4:6;
69#endif 69#endif
70}; 70};
71 71
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 3cbc327801d6..c451eec42a83 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
1665 __u8 audio_out_compensated, 1665 __u8 audio_out_compensated,
1666 __u8 audio_out_delay) 1666 __u8 audio_out_delay)
1667{ 1667{
1668 msg->len = 7; 1668 msg->len = 6;
1669 msg->msg[0] |= 0xf; /* broadcast */ 1669 msg->msg[0] |= 0xf; /* broadcast */
1670 msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; 1670 msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
1671 msg->msg[2] = phys_addr >> 8; 1671 msg->msg[2] = phys_addr >> 8;
1672 msg->msg[3] = phys_addr & 0xff; 1672 msg->msg[3] = phys_addr & 0xff;
1673 msg->msg[4] = video_latency; 1673 msg->msg[4] = video_latency;
1674 msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; 1674 msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
1675 msg->msg[6] = audio_out_delay; 1675 if (audio_out_compensated == 3)
1676 msg->msg[msg->len++] = audio_out_delay;
1676} 1677}
1677 1678
1678static inline void cec_ops_report_current_latency(const struct cec_msg *msg, 1679static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
1686 *video_latency = msg->msg[4]; 1687 *video_latency = msg->msg[4];
1687 *low_latency_mode = (msg->msg[5] >> 2) & 1; 1688 *low_latency_mode = (msg->msg[5] >> 2) & 1;
1688 *audio_out_compensated = msg->msg[5] & 3; 1689 *audio_out_compensated = msg->msg[5] & 3;
1689 *audio_out_delay = msg->msg[6]; 1690 if (*audio_out_compensated == 3 && msg->len >= 7)
1691 *audio_out_delay = msg->msg[6];
1692 else
1693 *audio_out_delay = 0;
1690} 1694}
1691 1695
1692static inline void cec_msg_request_current_latency(struct cec_msg *msg, 1696static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f0db7788f887..3dc91a46e8b8 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, 1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, 1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, 1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
1387 ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
1388 ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
1387 1389
1388 1390
1389 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1391 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
1393 */ 1395 */
1394 1396
1395 __ETHTOOL_LINK_MODE_LAST 1397 __ETHTOOL_LINK_MODE_LAST
1396 = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 1398 = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
1397}; 1399};
1398 1400
1399#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1401#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e02387d..d0b5fa91ff54 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ 9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
10#define NF_LOG_MASK 0x2f 10#define NF_LOG_MASK 0x2f
11 11
12#define NF_LOG_PREFIXLEN 128
13
12#endif /* _NETFILTER_NF_LOG_H */ 14#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 881d49e94569..e3f27e09eb2b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
235/** 235/**
236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes 236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes
237 * 237 *
238 * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32) 238 * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32) 239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
240 */ 240 */
241enum nft_rule_compat_attributes { 241enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
499 * enum nft_byteorder_ops - nf_tables byteorder operators 499 * enum nft_byteorder_ops - nf_tables byteorder operators
500 * 500 *
501 * @NFT_BYTEORDER_NTOH: network to host operator 501 * @NFT_BYTEORDER_NTOH: network to host operator
502 * @NFT_BYTEORDER_HTON: host to network opertaor 502 * @NFT_BYTEORDER_HTON: host to network operator
503 */ 503 */
504enum nft_byteorder_ops { 504enum nft_byteorder_ops {
505 NFT_BYTEORDER_NTOH, 505 NFT_BYTEORDER_NTOH,
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 82bdf5626859..bb68cb1b04ed 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -16,3 +16,4 @@ header-y += nes-abi.h
16header-y += ocrdma-abi.h 16header-y += ocrdma-abi.h
17header-y += hns-abi.h 17header-y += hns-abi.h
18header-y += vmw_pvrdma-abi.h 18header-y += vmw_pvrdma-abi.h
19header-y += qedr-abi.h
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index 48a19bda071b..d24eee12128f 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -30,7 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32#ifndef CXGB3_ABI_USER_H 32#ifndef CXGB3_ABI_USER_H
33#define CXBG3_ABI_USER_H 33#define CXGB3_ABI_USER_H
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36 36
diff --git a/init/Kconfig b/init/Kconfig
index e1a937348a3e..4dd8bd232a1d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1987,6 +1987,10 @@ config MODVERSIONS
1987 make them incompatible with the kernel you are running. If 1987 make them incompatible with the kernel you are running. If
1988 unsure, say N. 1988 unsure, say N.
1989 1989
1990config MODULE_REL_CRCS
1991 bool
1992 depends on MODVERSIONS
1993
1990config MODULE_SRCVERSION_ALL 1994config MODULE_SRCVERSION_ALL
1991 bool "Source checksum for all modules" 1995 bool "Source checksum for all modules"
1992 help 1996 help
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 229a5d5df977..3d55d95dcf49 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/vmalloc.h>
15#include <linux/slab.h> 14#include <linux/slab.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
17#include <linux/filter.h> 16#include <linux/filter.h>
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74 if (array_size >= U32_MAX - PAGE_SIZE) 73 if (array_size >= U32_MAX - PAGE_SIZE)
75 return ERR_PTR(-ENOMEM); 74 return ERR_PTR(-ENOMEM);
76 75
77
78 /* allocate all map elements and zero-initialize them */ 76 /* allocate all map elements and zero-initialize them */
79 array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); 77 array = bpf_map_area_alloc(array_size);
80 if (!array) { 78 if (!array)
81 array = vzalloc(array_size); 79 return ERR_PTR(-ENOMEM);
82 if (!array)
83 return ERR_PTR(-ENOMEM);
84 }
85 80
86 /* copy mandatory map attributes */ 81 /* copy mandatory map attributes */
87 array->map.map_type = attr->map_type; 82 array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
97 92
98 if (array_size >= U32_MAX - PAGE_SIZE || 93 if (array_size >= U32_MAX - PAGE_SIZE ||
99 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { 94 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
100 kvfree(array); 95 bpf_map_area_free(array);
101 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
102 } 97 }
103out: 98out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
262 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 257 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
263 bpf_array_free_percpu(array); 258 bpf_array_free_percpu(array);
264 259
265 kvfree(array); 260 bpf_map_area_free(array);
266} 261}
267 262
268static const struct bpf_map_ops array_ops = { 263static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
319 /* make sure it's empty */ 314 /* make sure it's empty */
320 for (i = 0; i < array->map.max_entries; i++) 315 for (i = 0; i < array->map.max_entries; i++)
321 BUG_ON(array->ptrs[i] != NULL); 316 BUG_ON(array->ptrs[i] != NULL);
322 kvfree(array); 317
318 bpf_map_area_free(array);
323} 319}
324 320
325static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 321static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3f2bb58952d8..a753bbe7df0a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
13#include <linux/bpf.h> 13#include <linux/bpf.h>
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/filter.h> 15#include <linux/filter.h>
16#include <linux/vmalloc.h>
17#include "percpu_freelist.h" 16#include "percpu_freelist.h"
18#include "bpf_lru_list.h" 17#include "bpf_lru_list.h"
19 18
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
103 free_percpu(pptr); 102 free_percpu(pptr);
104 } 103 }
105free_elems: 104free_elems:
106 vfree(htab->elems); 105 bpf_map_area_free(htab->elems);
107} 106}
108 107
109static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 108static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
125{ 124{
126 int err = -ENOMEM, i; 125 int err = -ENOMEM, i;
127 126
128 htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); 127 htab->elems = bpf_map_area_alloc(htab->elem_size *
128 htab->map.max_entries);
129 if (!htab->elems) 129 if (!htab->elems)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
320 goto free_htab; 320 goto free_htab;
321 321
322 err = -ENOMEM; 322 err = -ENOMEM;
323 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), 323 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
324 GFP_USER | __GFP_NOWARN); 324 sizeof(struct bucket));
325 325 if (!htab->buckets)
326 if (!htab->buckets) { 326 goto free_htab;
327 htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
328 if (!htab->buckets)
329 goto free_htab;
330 }
331 327
332 for (i = 0; i < htab->n_buckets; i++) { 328 for (i = 0; i < htab->n_buckets; i++) {
333 INIT_HLIST_HEAD(&htab->buckets[i].head); 329 INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
354free_extra_elems: 350free_extra_elems:
355 free_percpu(htab->extra_elems); 351 free_percpu(htab->extra_elems);
356free_buckets: 352free_buckets:
357 kvfree(htab->buckets); 353 bpf_map_area_free(htab->buckets);
358free_htab: 354free_htab:
359 kfree(htab); 355 kfree(htab);
360 return ERR_PTR(err); 356 return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
1014 prealloc_destroy(htab); 1010 prealloc_destroy(htab);
1015 1011
1016 free_percpu(htab->extra_elems); 1012 free_percpu(htab->extra_elems);
1017 kvfree(htab->buckets); 1013 bpf_map_area_free(htab->buckets);
1018 kfree(htab); 1014 kfree(htab);
1019} 1015}
1020 1016
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 732ae16d12b7..be8519148c25 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,7 +7,6 @@
7#include <linux/bpf.h> 7#include <linux/bpf.h>
8#include <linux/jhash.h> 8#include <linux/jhash.h>
9#include <linux/filter.h> 9#include <linux/filter.h>
10#include <linux/vmalloc.h>
11#include <linux/stacktrace.h> 10#include <linux/stacktrace.h>
12#include <linux/perf_event.h> 11#include <linux/perf_event.h>
13#include "percpu_freelist.h" 12#include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
32 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 31 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
33 int err; 32 int err;
34 33
35 smap->elems = vzalloc(elem_size * smap->map.max_entries); 34 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
36 if (!smap->elems) 35 if (!smap->elems)
37 return -ENOMEM; 36 return -ENOMEM;
38 37
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
45 return 0; 44 return 0;
46 45
47free_elems: 46free_elems:
48 vfree(smap->elems); 47 bpf_map_area_free(smap->elems);
49 return err; 48 return err;
50} 49}
51 50
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
76 if (cost >= U32_MAX - PAGE_SIZE) 75 if (cost >= U32_MAX - PAGE_SIZE)
77 return ERR_PTR(-E2BIG); 76 return ERR_PTR(-E2BIG);
78 77
79 smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); 78 smap = bpf_map_area_alloc(cost);
80 if (!smap) { 79 if (!smap)
81 smap = vzalloc(cost); 80 return ERR_PTR(-ENOMEM);
82 if (!smap)
83 return ERR_PTR(-ENOMEM);
84 }
85 81
86 err = -E2BIG; 82 err = -E2BIG;
87 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 83 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
112put_buffers: 108put_buffers:
113 put_callchain_buffers(); 109 put_callchain_buffers();
114free_smap: 110free_smap:
115 kvfree(smap); 111 bpf_map_area_free(smap);
116 return ERR_PTR(err); 112 return ERR_PTR(err);
117} 113}
118 114
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
262 /* wait for bpf programs to complete before freeing stack map */ 258 /* wait for bpf programs to complete before freeing stack map */
263 synchronize_rcu(); 259 synchronize_rcu();
264 260
265 vfree(smap->elems); 261 bpf_map_area_free(smap->elems);
266 pcpu_freelist_destroy(&smap->freelist); 262 pcpu_freelist_destroy(&smap->freelist);
267 kvfree(smap); 263 bpf_map_area_free(smap);
268 put_callchain_buffers(); 264 put_callchain_buffers();
269} 265}
270 266
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1d6b29e4e2c3..19b6129eab23 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -12,6 +12,8 @@
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mmzone.h>
15#include <linux/anon_inodes.h> 17#include <linux/anon_inodes.h>
16#include <linux/file.h> 18#include <linux/file.h>
17#include <linux/license.h> 19#include <linux/license.h>
@@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
49 list_add(&tl->list_node, &bpf_map_types); 51 list_add(&tl->list_node, &bpf_map_types);
50} 52}
51 53
54void *bpf_map_area_alloc(size_t size)
55{
56 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
57 * trigger under memory pressure as we really just want to
58 * fail instead.
59 */
60 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
61 void *area;
62
63 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
64 area = kmalloc(size, GFP_USER | flags);
65 if (area != NULL)
66 return area;
67 }
68
69 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
70 PAGE_KERNEL);
71}
72
73void bpf_map_area_free(void *area)
74{
75 kvfree(area);
76}
77
52int bpf_map_precharge_memlock(u32 pages) 78int bpf_map_precharge_memlock(u32 pages)
53{ 79{
54 struct user_struct *user = get_current_user(); 80 struct user_struct *user = get_current_user();
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2ee9ec3051b2..688dd02af985 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5221,6 +5221,11 @@ err_free_css:
5221 return ERR_PTR(err); 5221 return ERR_PTR(err);
5222} 5222}
5223 5223
5224/*
5225 * The returned cgroup is fully initialized including its control mask, but
5226 * it isn't associated with its kernfs_node and doesn't have the control
5227 * mask applied.
5228 */
5224static struct cgroup *cgroup_create(struct cgroup *parent) 5229static struct cgroup *cgroup_create(struct cgroup *parent)
5225{ 5230{
5226 struct cgroup_root *root = parent->root; 5231 struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
5288 5293
5289 cgroup_propagate_control(cgrp); 5294 cgroup_propagate_control(cgrp);
5290 5295
5291 /* @cgrp doesn't have dir yet so the following will only create csses */
5292 ret = cgroup_apply_control_enable(cgrp);
5293 if (ret)
5294 goto out_destroy;
5295
5296 return cgrp; 5296 return cgrp;
5297 5297
5298out_cancel_ref: 5298out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
5300out_free_cgrp: 5300out_free_cgrp:
5301 kfree(cgrp); 5301 kfree(cgrp);
5302 return ERR_PTR(ret); 5302 return ERR_PTR(ret);
5303out_destroy:
5304 cgroup_destroy_locked(cgrp);
5305 return ERR_PTR(ret);
5306} 5303}
5307 5304
5308static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 5305static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 110b38a58493..e5aaa806702d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1469static void 1469static void
1470list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1470list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1471{ 1471{
1472
1473 lockdep_assert_held(&ctx->lock); 1472 lockdep_assert_held(&ctx->lock);
1474 1473
1475 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1474 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
1624{ 1623{
1625 struct perf_event *group_leader = event->group_leader, *pos; 1624 struct perf_event *group_leader = event->group_leader, *pos;
1626 1625
1626 lockdep_assert_held(&event->ctx->lock);
1627
1627 /* 1628 /*
1628 * We can have double attach due to group movement in perf_event_open. 1629 * We can have double attach due to group movement in perf_event_open.
1629 */ 1630 */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
1697 struct perf_event *sibling, *tmp; 1698 struct perf_event *sibling, *tmp;
1698 struct list_head *list = NULL; 1699 struct list_head *list = NULL;
1699 1700
1701 lockdep_assert_held(&event->ctx->lock);
1702
1700 /* 1703 /*
1701 * We can have double detach due to exit/hot-unplug + close. 1704 * We can have double detach due to exit/hot-unplug + close.
1702 */ 1705 */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
1895 */ 1898 */
1896static void perf_remove_from_context(struct perf_event *event, unsigned long flags) 1899static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1897{ 1900{
1898 lockdep_assert_held(&event->ctx->mutex); 1901 struct perf_event_context *ctx = event->ctx;
1902
1903 lockdep_assert_held(&ctx->mutex);
1899 1904
1900 event_function_call(event, __perf_remove_from_context, (void *)flags); 1905 event_function_call(event, __perf_remove_from_context, (void *)flags);
1906
1907 /*
1908 * The above event_function_call() can NO-OP when it hits
1909 * TASK_TOMBSTONE. In that case we must already have been detached
1910 * from the context (by perf_event_exit_event()) but the grouping
1911 * might still be in-tact.
1912 */
1913 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1914 if ((flags & DETACH_GROUP) &&
1915 (event->attach_state & PERF_ATTACH_GROUP)) {
1916 /*
1917 * Since in that case we cannot possibly be scheduled, simply
1918 * detach now.
1919 */
1920 raw_spin_lock_irq(&ctx->lock);
1921 perf_group_detach(event);
1922 raw_spin_unlock_irq(&ctx->lock);
1923 }
1901} 1924}
1902 1925
1903/* 1926/*
@@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6609 char *buf = NULL; 6632 char *buf = NULL;
6610 char *name; 6633 char *name;
6611 6634
6635 if (vma->vm_flags & VM_READ)
6636 prot |= PROT_READ;
6637 if (vma->vm_flags & VM_WRITE)
6638 prot |= PROT_WRITE;
6639 if (vma->vm_flags & VM_EXEC)
6640 prot |= PROT_EXEC;
6641
6642 if (vma->vm_flags & VM_MAYSHARE)
6643 flags = MAP_SHARED;
6644 else
6645 flags = MAP_PRIVATE;
6646
6647 if (vma->vm_flags & VM_DENYWRITE)
6648 flags |= MAP_DENYWRITE;
6649 if (vma->vm_flags & VM_MAYEXEC)
6650 flags |= MAP_EXECUTABLE;
6651 if (vma->vm_flags & VM_LOCKED)
6652 flags |= MAP_LOCKED;
6653 if (vma->vm_flags & VM_HUGETLB)
6654 flags |= MAP_HUGETLB;
6655
6612 if (file) { 6656 if (file) {
6613 struct inode *inode; 6657 struct inode *inode;
6614 dev_t dev; 6658 dev_t dev;
@@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6635 maj = MAJOR(dev); 6679 maj = MAJOR(dev);
6636 min = MINOR(dev); 6680 min = MINOR(dev);
6637 6681
6638 if (vma->vm_flags & VM_READ)
6639 prot |= PROT_READ;
6640 if (vma->vm_flags & VM_WRITE)
6641 prot |= PROT_WRITE;
6642 if (vma->vm_flags & VM_EXEC)
6643 prot |= PROT_EXEC;
6644
6645 if (vma->vm_flags & VM_MAYSHARE)
6646 flags = MAP_SHARED;
6647 else
6648 flags = MAP_PRIVATE;
6649
6650 if (vma->vm_flags & VM_DENYWRITE)
6651 flags |= MAP_DENYWRITE;
6652 if (vma->vm_flags & VM_MAYEXEC)
6653 flags |= MAP_EXECUTABLE;
6654 if (vma->vm_flags & VM_LOCKED)
6655 flags |= MAP_LOCKED;
6656 if (vma->vm_flags & VM_HUGETLB)
6657 flags |= MAP_HUGETLB;
6658
6659 goto got_name; 6682 goto got_name;
6660 } else { 6683 } else {
6661 if (vma->vm_ops && vma->vm_ops->name) { 6684 if (vma->vm_ops && vma->vm_ops->name) {
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 8c0a0ae43521..b59e6768c5e9 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
1346} 1346}
1347EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1347EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1348 1348
1349static void __irq_domain_activate_irq(struct irq_data *irq_data)
1350{
1351 if (irq_data && irq_data->domain) {
1352 struct irq_domain *domain = irq_data->domain;
1353
1354 if (irq_data->parent_data)
1355 __irq_domain_activate_irq(irq_data->parent_data);
1356 if (domain->ops->activate)
1357 domain->ops->activate(domain, irq_data);
1358 }
1359}
1360
1361static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1362{
1363 if (irq_data && irq_data->domain) {
1364 struct irq_domain *domain = irq_data->domain;
1365
1366 if (domain->ops->deactivate)
1367 domain->ops->deactivate(domain, irq_data);
1368 if (irq_data->parent_data)
1369 __irq_domain_deactivate_irq(irq_data->parent_data);
1370 }
1371}
1372
1349/** 1373/**
1350 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1374 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1351 * interrupt 1375 * interrupt
@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1356 */ 1380 */
1357void irq_domain_activate_irq(struct irq_data *irq_data) 1381void irq_domain_activate_irq(struct irq_data *irq_data)
1358{ 1382{
1359 if (irq_data && irq_data->domain) { 1383 if (!irqd_is_activated(irq_data)) {
1360 struct irq_domain *domain = irq_data->domain; 1384 __irq_domain_activate_irq(irq_data);
1361 1385 irqd_set_activated(irq_data);
1362 if (irq_data->parent_data)
1363 irq_domain_activate_irq(irq_data->parent_data);
1364 if (domain->ops->activate)
1365 domain->ops->activate(domain, irq_data);
1366 } 1386 }
1367} 1387}
1368 1388
@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
1376 */ 1396 */
1377void irq_domain_deactivate_irq(struct irq_data *irq_data) 1397void irq_domain_deactivate_irq(struct irq_data *irq_data)
1378{ 1398{
1379 if (irq_data && irq_data->domain) { 1399 if (irqd_is_activated(irq_data)) {
1380 struct irq_domain *domain = irq_data->domain; 1400 __irq_domain_deactivate_irq(irq_data);
1381 1401 irqd_clr_activated(irq_data);
1382 if (domain->ops->deactivate)
1383 domain->ops->deactivate(domain, irq_data);
1384 if (irq_data->parent_data)
1385 irq_domain_deactivate_irq(irq_data->parent_data);
1386 } 1402 }
1387} 1403}
1388 1404
diff --git a/kernel/module.c b/kernel/module.c
index 38d4270925d4..3d8f126208e3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
389extern const struct kernel_symbol __stop___ksymtab_gpl[]; 389extern const struct kernel_symbol __stop___ksymtab_gpl[];
390extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 390extern const struct kernel_symbol __start___ksymtab_gpl_future[];
391extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 391extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
392extern const unsigned long __start___kcrctab[]; 392extern const s32 __start___kcrctab[];
393extern const unsigned long __start___kcrctab_gpl[]; 393extern const s32 __start___kcrctab_gpl[];
394extern const unsigned long __start___kcrctab_gpl_future[]; 394extern const s32 __start___kcrctab_gpl_future[];
395#ifdef CONFIG_UNUSED_SYMBOLS 395#ifdef CONFIG_UNUSED_SYMBOLS
396extern const struct kernel_symbol __start___ksymtab_unused[]; 396extern const struct kernel_symbol __start___ksymtab_unused[];
397extern const struct kernel_symbol __stop___ksymtab_unused[]; 397extern const struct kernel_symbol __stop___ksymtab_unused[];
398extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; 398extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
399extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; 399extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
400extern const unsigned long __start___kcrctab_unused[]; 400extern const s32 __start___kcrctab_unused[];
401extern const unsigned long __start___kcrctab_unused_gpl[]; 401extern const s32 __start___kcrctab_unused_gpl[];
402#endif 402#endif
403 403
404#ifndef CONFIG_MODVERSIONS 404#ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
497 497
498 /* Output */ 498 /* Output */
499 struct module *owner; 499 struct module *owner;
500 const unsigned long *crc; 500 const s32 *crc;
501 const struct kernel_symbol *sym; 501 const struct kernel_symbol *sym;
502}; 502};
503 503
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
563 * (optional) module which owns it. Needs preempt disabled or module_mutex. */ 563 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
564const struct kernel_symbol *find_symbol(const char *name, 564const struct kernel_symbol *find_symbol(const char *name,
565 struct module **owner, 565 struct module **owner,
566 const unsigned long **crc, 566 const s32 **crc,
567 bool gplok, 567 bool gplok,
568 bool warn) 568 bool warn)
569{ 569{
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
1249} 1249}
1250 1250
1251#ifdef CONFIG_MODVERSIONS 1251#ifdef CONFIG_MODVERSIONS
1252/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ 1252
1253static unsigned long maybe_relocated(unsigned long crc, 1253static u32 resolve_rel_crc(const s32 *crc)
1254 const struct module *crc_owner)
1255{ 1254{
1256#ifdef ARCH_RELOCATES_KCRCTAB 1255 return *(u32 *)((void *)crc + *crc);
1257 if (crc_owner == NULL)
1258 return crc - (unsigned long)reloc_start;
1259#endif
1260 return crc;
1261} 1256}
1262 1257
1263static int check_version(Elf_Shdr *sechdrs, 1258static int check_version(Elf_Shdr *sechdrs,
1264 unsigned int versindex, 1259 unsigned int versindex,
1265 const char *symname, 1260 const char *symname,
1266 struct module *mod, 1261 struct module *mod,
1267 const unsigned long *crc, 1262 const s32 *crc)
1268 const struct module *crc_owner)
1269{ 1263{
1270 unsigned int i, num_versions; 1264 unsigned int i, num_versions;
1271 struct modversion_info *versions; 1265 struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
1283 / sizeof(struct modversion_info); 1277 / sizeof(struct modversion_info);
1284 1278
1285 for (i = 0; i < num_versions; i++) { 1279 for (i = 0; i < num_versions; i++) {
1280 u32 crcval;
1281
1286 if (strcmp(versions[i].name, symname) != 0) 1282 if (strcmp(versions[i].name, symname) != 0)
1287 continue; 1283 continue;
1288 1284
1289 if (versions[i].crc == maybe_relocated(*crc, crc_owner)) 1285 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1286 crcval = resolve_rel_crc(crc);
1287 else
1288 crcval = *crc;
1289 if (versions[i].crc == crcval)
1290 return 1; 1290 return 1;
1291 pr_debug("Found checksum %lX vs module %lX\n", 1291 pr_debug("Found checksum %X vs module %lX\n",
1292 maybe_relocated(*crc, crc_owner), versions[i].crc); 1292 crcval, versions[i].crc);
1293 goto bad_version; 1293 goto bad_version;
1294 } 1294 }
1295 1295
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1307 unsigned int versindex, 1307 unsigned int versindex,
1308 struct module *mod) 1308 struct module *mod)
1309{ 1309{
1310 const unsigned long *crc; 1310 const s32 *crc;
1311 1311
1312 /* 1312 /*
1313 * Since this should be found in kernel (which can't be removed), no 1313 * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1321 } 1321 }
1322 preempt_enable(); 1322 preempt_enable();
1323 return check_version(sechdrs, versindex, 1323 return check_version(sechdrs, versindex,
1324 VMLINUX_SYMBOL_STR(module_layout), mod, crc, 1324 VMLINUX_SYMBOL_STR(module_layout), mod, crc);
1325 NULL);
1326} 1325}
1327 1326
1328/* First part is kernel version, which we ignore if module has crcs. */ 1327/* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
1340 unsigned int versindex, 1339 unsigned int versindex,
1341 const char *symname, 1340 const char *symname,
1342 struct module *mod, 1341 struct module *mod,
1343 const unsigned long *crc, 1342 const s32 *crc)
1344 const struct module *crc_owner)
1345{ 1343{
1346 return 1; 1344 return 1;
1347} 1345}
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1368{ 1366{
1369 struct module *owner; 1367 struct module *owner;
1370 const struct kernel_symbol *sym; 1368 const struct kernel_symbol *sym;
1371 const unsigned long *crc; 1369 const s32 *crc;
1372 int err; 1370 int err;
1373 1371
1374 /* 1372 /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1383 if (!sym) 1381 if (!sym)
1384 goto unlock; 1382 goto unlock;
1385 1383
1386 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, 1384 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
1387 owner)) {
1388 sym = ERR_PTR(-EINVAL); 1385 sym = ERR_PTR(-EINVAL);
1389 goto getname; 1386 goto getname;
1390 } 1387 }
diff --git a/kernel/panic.c b/kernel/panic.c
index 901c4fb46002..08aa88dde7de 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
249 * Delay timeout seconds before rebooting the machine. 249 * Delay timeout seconds before rebooting the machine.
250 * We can't use the "normal" timers since we just panicked. 250 * We can't use the "normal" timers since we just panicked.
251 */ 251 */
252 pr_emerg("Rebooting in %d seconds..", panic_timeout); 252 pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
253 253
254 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 254 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
255 touch_nmi_watchdog(); 255 touch_nmi_watchdog();
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index f67ceb7768b8..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
46const char *mem_sleep_states[PM_SUSPEND_MAX]; 46const char *mem_sleep_states[PM_SUSPEND_MAX];
47 47
48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; 48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
49suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; 49static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
50 50
51unsigned int pm_suspend_global_flags; 51unsigned int pm_suspend_global_flags;
52EXPORT_SYMBOL_GPL(pm_suspend_global_flags); 52EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
168 } 168 }
169 if (valid_state(PM_SUSPEND_MEM)) { 169 if (valid_state(PM_SUSPEND_MEM)) {
170 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; 170 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
171 if (mem_sleep_default >= PM_SUSPEND_MEM) 171 if (mem_sleep_default == PM_SUSPEND_MEM)
172 mem_sleep_current = PM_SUSPEND_MEM; 172 mem_sleep_current = PM_SUSPEND_MEM;
173 } 173 }
174 174
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8dbaec0e4f7f..1aea594a54db 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2475 break; 2475 break;
2476 if (neg) 2476 if (neg)
2477 continue; 2477 continue;
2478 val = convmul * val / convdiv;
2478 if ((min && val < *min) || (max && val > *max)) 2479 if ((min && val < *min) || (max && val > *max))
2479 continue; 2480 continue;
2480 *i = val; 2481 *i = val;
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 775569ec50d0..af344a1bf0d0 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -266,7 +266,7 @@ out:
266static struct cpumask save_cpumask; 266static struct cpumask save_cpumask;
267static bool disable_migrate; 267static bool disable_migrate;
268 268
269static void move_to_next_cpu(void) 269static void move_to_next_cpu(bool initmask)
270{ 270{
271 static struct cpumask *current_mask; 271 static struct cpumask *current_mask;
272 int next_cpu; 272 int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
275 return; 275 return;
276 276
277 /* Just pick the first CPU on first iteration */ 277 /* Just pick the first CPU on first iteration */
278 if (!current_mask) { 278 if (initmask) {
279 current_mask = &save_cpumask; 279 current_mask = &save_cpumask;
280 get_online_cpus(); 280 get_online_cpus();
281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
330static int kthread_fn(void *data) 330static int kthread_fn(void *data)
331{ 331{
332 u64 interval; 332 u64 interval;
333 bool initmask = true;
333 334
334 while (!kthread_should_stop()) { 335 while (!kthread_should_stop()) {
335 336
336 move_to_next_cpu(); 337 move_to_next_cpu(initmask);
338 initmask = false;
337 339
338 local_irq_disable(); 340 local_irq_disable();
339 get_sample(); 341 get_sample();
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a133ecd741e4..7ad9e53ad174 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1372 return a1 + a2 + a3 + a4 + a5 + a6; 1372 return a1 + a2 + a3 + a4 + a5 + a6;
1373} 1373}
1374 1374
1375static struct __init trace_event_file * 1375static __init struct trace_event_file *
1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1377{ 1377{
1378 struct trace_event_file *file; 1378 struct trace_event_file *file;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 9d20d5dd298a..4bbd38ec3788 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
128 struct hlist_head *hashent = ucounts_hashentry(ns, uid); 128 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
129 struct ucounts *ucounts, *new; 129 struct ucounts *ucounts, *new;
130 130
131 spin_lock(&ucounts_lock); 131 spin_lock_irq(&ucounts_lock);
132 ucounts = find_ucounts(ns, uid, hashent); 132 ucounts = find_ucounts(ns, uid, hashent);
133 if (!ucounts) { 133 if (!ucounts) {
134 spin_unlock(&ucounts_lock); 134 spin_unlock_irq(&ucounts_lock);
135 135
136 new = kzalloc(sizeof(*new), GFP_KERNEL); 136 new = kzalloc(sizeof(*new), GFP_KERNEL);
137 if (!new) 137 if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
141 new->uid = uid; 141 new->uid = uid;
142 atomic_set(&new->count, 0); 142 atomic_set(&new->count, 0);
143 143
144 spin_lock(&ucounts_lock); 144 spin_lock_irq(&ucounts_lock);
145 ucounts = find_ucounts(ns, uid, hashent); 145 ucounts = find_ucounts(ns, uid, hashent);
146 if (ucounts) { 146 if (ucounts) {
147 kfree(new); 147 kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
152 } 152 }
153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
154 ucounts = NULL; 154 ucounts = NULL;
155 spin_unlock(&ucounts_lock); 155 spin_unlock_irq(&ucounts_lock);
156 return ucounts; 156 return ucounts;
157} 157}
158 158
159static void put_ucounts(struct ucounts *ucounts) 159static void put_ucounts(struct ucounts *ucounts)
160{ 160{
161 unsigned long flags;
162
161 if (atomic_dec_and_test(&ucounts->count)) { 163 if (atomic_dec_and_test(&ucounts->count)) {
162 spin_lock(&ucounts_lock); 164 spin_lock_irqsave(&ucounts_lock, flags);
163 hlist_del_init(&ucounts->node); 165 hlist_del_init(&ucounts->node);
164 spin_unlock(&ucounts_lock); 166 spin_unlock_irqrestore(&ucounts_lock, flags);
165 167
166 kfree(ucounts); 168 kfree(ucounts);
167 } 169 }
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d4b0fa01cae3..63177be0159e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49#define for_each_watchdog_cpu(cpu) \ 49#define for_each_watchdog_cpu(cpu) \
50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
51 51
52atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
53
52/* 54/*
53 * The 'watchdog_running' variable is set to 1 when the watchdog threads 55 * The 'watchdog_running' variable is set to 1 when the watchdog threads
54 * are registered/started and is set to 0 when the watchdog threads are 56 * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
260 int duration; 262 int duration;
261 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 263 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
262 264
265 if (atomic_read(&watchdog_park_in_progress) != 0)
266 return HRTIMER_NORESTART;
267
263 /* kick the hardlockup detector */ 268 /* kick the hardlockup detector */
264 watchdog_interrupt_count(); 269 watchdog_interrupt_count();
265 270
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
467{ 472{
468 int cpu, ret = 0; 473 int cpu, ret = 0;
469 474
475 atomic_set(&watchdog_park_in_progress, 1);
476
470 for_each_watchdog_cpu(cpu) { 477 for_each_watchdog_cpu(cpu) {
471 ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); 478 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
472 if (ret) 479 if (ret)
473 break; 480 break;
474 } 481 }
475 482
483 atomic_set(&watchdog_park_in_progress, 0);
484
476 return ret; 485 return ret;
477} 486}
478 487
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 84016c8aee6b..12b8dd640786 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
84 /* Ensure the watchdog never gets throttled */ 84 /* Ensure the watchdog never gets throttled */
85 event->hw.interrupts = 0; 85 event->hw.interrupts = 0;
86 86
87 if (atomic_read(&watchdog_park_in_progress) != 0)
88 return;
89
87 if (__this_cpu_read(watchdog_nmi_touch) == true) { 90 if (__this_cpu_read(watchdog_nmi_touch) == true) {
88 __this_cpu_write(watchdog_nmi_touch, false); 91 __this_cpu_write(watchdog_nmi_touch, false);
89 return; 92 return;
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
144 144
145 return err; 145 return err;
146} 146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0b92d605fb69..84812a9fb16f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
769 struct radix_tree_node *old = child; 769 struct radix_tree_node *old = child;
770 offset = child->offset + 1; 770 offset = child->offset + 1;
771 child = child->parent; 771 child = child->parent;
772 WARN_ON_ONCE(!list_empty(&node->private_list)); 772 WARN_ON_ONCE(!list_empty(&old->private_list));
773 radix_tree_node_free(old); 773 radix_tree_node_free(old);
774 if (old == entry_to_node(node)) 774 if (old == entry_to_node(node))
775 return; 775 return;
diff --git a/mm/filemap.c b/mm/filemap.c
index b772a33ef640..3f9afded581b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1791 1791
1792 cond_resched(); 1792 cond_resched();
1793find_page: 1793find_page:
1794 if (fatal_signal_pending(current)) {
1795 error = -EINTR;
1796 goto out;
1797 }
1798
1794 page = find_get_page(mapping, index); 1799 page = find_get_page(mapping, index);
1795 if (!page) { 1800 if (!page) {
1796 page_cache_sync_readahead(mapping, 1801 page_cache_sync_readahead(mapping,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9a6bd6c8d55a..5f3ad65c85de 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
783 783
784 assert_spin_locked(pmd_lockptr(mm, pmd)); 784 assert_spin_locked(pmd_lockptr(mm, pmd));
785 785
786 /*
787 * When we COW a devmap PMD entry, we split it into PTEs, so we should
788 * not be in this function with `flags & FOLL_COW` set.
789 */
790 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
791
786 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 792 if (flags & FOLL_WRITE && !pmd_write(*pmd))
787 return NULL; 793 return NULL;
788 794
@@ -1128,6 +1134,16 @@ out_unlock:
1128 return ret; 1134 return ret;
1129} 1135}
1130 1136
1137/*
1138 * FOLL_FORCE can write to even unwritable pmd's, but only
1139 * after we've gone through a COW cycle and they are dirty.
1140 */
1141static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1142{
1143 return pmd_write(pmd) ||
1144 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1145}
1146
1131struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1147struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1132 unsigned long addr, 1148 unsigned long addr,
1133 pmd_t *pmd, 1149 pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1138 1154
1139 assert_spin_locked(pmd_lockptr(mm, pmd)); 1155 assert_spin_locked(pmd_lockptr(mm, pmd));
1140 1156
1141 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1157 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1142 goto out; 1158 goto out;
1143 1159
1144 /* Avoid dumping huge zero page */ 1160 /* Avoid dumping huge zero page */
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b82b3e215157..f479365530b6 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/ftrace.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
18#include <linux/printk.h> 19#include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
300 if (likely(!kasan_report_enabled())) 301 if (likely(!kasan_report_enabled()))
301 return; 302 return;
302 303
304 disable_trace_on_warning();
305
303 info.access_addr = (void *)addr; 306 info.access_addr = (void *)addr;
304 info.access_size = size; 307 info.access_size = size;
305 info.is_write = is_write; 308 info.is_write = is_write;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a63a8f832664..b822e158b319 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
4353 return ret; 4353 return ret;
4354 } 4354 }
4355 4355
4356 /* Try charges one by one with reclaim */ 4356 /* Try charges one by one with reclaim, but do not retry */
4357 while (count--) { 4357 while (count--) {
4358 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4358 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4359 if (ret) 4359 if (ret)
4360 return ret; 4360 return ret;
4361 mc.precharge++; 4361 mc.precharge++;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e43142c15631..b8c11e063ff0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
1033 node_set_state(node, N_MEMORY); 1033 node_set_state(node, N_MEMORY);
1034} 1034}
1035 1035
1036int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 1036bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1037 enum zone_type target) 1037 enum zone_type target, int *zone_shift)
1038{ 1038{
1039 struct zone *zone = page_zone(pfn_to_page(pfn)); 1039 struct zone *zone = page_zone(pfn_to_page(pfn));
1040 enum zone_type idx = zone_idx(zone); 1040 enum zone_type idx = zone_idx(zone);
1041 int i; 1041 int i;
1042 1042
1043 *zone_shift = 0;
1044
1043 if (idx < target) { 1045 if (idx < target) {
1044 /* pages must be at end of current zone */ 1046 /* pages must be at end of current zone */
1045 if (pfn + nr_pages != zone_end_pfn(zone)) 1047 if (pfn + nr_pages != zone_end_pfn(zone))
1046 return 0; 1048 return false;
1047 1049
1048 /* no zones in use between current zone and target */ 1050 /* no zones in use between current zone and target */
1049 for (i = idx + 1; i < target; i++) 1051 for (i = idx + 1; i < target; i++)
1050 if (zone_is_initialized(zone - idx + i)) 1052 if (zone_is_initialized(zone - idx + i))
1051 return 0; 1053 return false;
1052 } 1054 }
1053 1055
1054 if (target < idx) { 1056 if (target < idx) {
1055 /* pages must be at beginning of current zone */ 1057 /* pages must be at beginning of current zone */
1056 if (pfn != zone->zone_start_pfn) 1058 if (pfn != zone->zone_start_pfn)
1057 return 0; 1059 return false;
1058 1060
1059 /* no zones in use between current zone and target */ 1061 /* no zones in use between current zone and target */
1060 for (i = target + 1; i < idx; i++) 1062 for (i = target + 1; i < idx; i++)
1061 if (zone_is_initialized(zone - idx + i)) 1063 if (zone_is_initialized(zone - idx + i))
1062 return 0; 1064 return false;
1063 } 1065 }
1064 1066
1065 return target - idx; 1067 *zone_shift = target - idx;
1068 return true;
1066} 1069}
1067 1070
1068/* Must be protected by mem_hotplug_begin() */ 1071/* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
1089 !can_online_high_movable(zone)) 1092 !can_online_high_movable(zone))
1090 return -EINVAL; 1093 return -EINVAL;
1091 1094
1092 if (online_type == MMOP_ONLINE_KERNEL) 1095 if (online_type == MMOP_ONLINE_KERNEL) {
1093 zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); 1096 if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1094 else if (online_type == MMOP_ONLINE_MOVABLE) 1097 return -EINVAL;
1095 zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); 1098 } else if (online_type == MMOP_ONLINE_MOVABLE) {
1099 if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1100 return -EINVAL;
1101 }
1096 1102
1097 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); 1103 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
1098 if (!zone) 1104 if (!zone)
@@ -1477,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1477} 1483}
1478 1484
1479/* 1485/*
1480 * Confirm all pages in a range [start, end) is belongs to the same zone. 1486 * Confirm all pages in a range [start, end) belong to the same zone.
1487 * When true, return its valid [start, end).
1481 */ 1488 */
1482int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 1489int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1490 unsigned long *valid_start, unsigned long *valid_end)
1483{ 1491{
1484 unsigned long pfn, sec_end_pfn; 1492 unsigned long pfn, sec_end_pfn;
1493 unsigned long start, end;
1485 struct zone *zone = NULL; 1494 struct zone *zone = NULL;
1486 struct page *page; 1495 struct page *page;
1487 int i; 1496 int i;
1488 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); 1497 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1489 pfn < end_pfn; 1498 pfn < end_pfn;
1490 pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { 1499 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1491 /* Make sure the memory section is present first */ 1500 /* Make sure the memory section is present first */
1492 if (!present_section_nr(pfn_to_section_nr(pfn))) 1501 if (!present_section_nr(pfn_to_section_nr(pfn)))
1493 continue; 1502 continue;
@@ -1503,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1503 page = pfn_to_page(pfn + i); 1512 page = pfn_to_page(pfn + i);
1504 if (zone && page_zone(page) != zone) 1513 if (zone && page_zone(page) != zone)
1505 return 0; 1514 return 0;
1515 if (!zone)
1516 start = pfn + i;
1506 zone = page_zone(page); 1517 zone = page_zone(page);
1518 end = pfn + MAX_ORDER_NR_PAGES;
1507 } 1519 }
1508 } 1520 }
1509 return 1; 1521
1522 if (zone) {
1523 *valid_start = start;
1524 *valid_end = end;
1525 return 1;
1526 } else {
1527 return 0;
1528 }
1510} 1529}
1511 1530
1512/* 1531/*
@@ -1833,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
1833 long offlined_pages; 1852 long offlined_pages;
1834 int ret, drain, retry_max, node; 1853 int ret, drain, retry_max, node;
1835 unsigned long flags; 1854 unsigned long flags;
1855 unsigned long valid_start, valid_end;
1836 struct zone *zone; 1856 struct zone *zone;
1837 struct memory_notify arg; 1857 struct memory_notify arg;
1838 1858
@@ -1843,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
1843 return -EINVAL; 1863 return -EINVAL;
1844 /* This makes hotplug much easier...and readable. 1864 /* This makes hotplug much easier...and readable.
1845 we assume this for now. .*/ 1865 we assume this for now. .*/
1846 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 1866 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
1847 return -EINVAL; 1867 return -EINVAL;
1848 1868
1849 zone = page_zone(pfn_to_page(start_pfn)); 1869 zone = page_zone(pfn_to_page(valid_start));
1850 node = zone_to_nid(zone); 1870 node = zone_to_nid(zone);
1851 nr_pages = end_pfn - start_pfn; 1871 nr_pages = end_pfn - start_pfn;
1852 1872
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2e346645eb80..1e7873e40c9a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2017,8 +2017,8 @@ retry_cpuset:
2017 2017
2018 nmask = policy_nodemask(gfp, pol); 2018 nmask = policy_nodemask(gfp, pol);
2019 zl = policy_zonelist(gfp, pol, node); 2019 zl = policy_zonelist(gfp, pol, node);
2020 mpol_cond_put(pol);
2021 page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2020 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2021 mpol_cond_put(pol);
2022out: 2022out:
2023 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2023 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2024 goto retry_cpuset; 2024 goto retry_cpuset;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d604d2596b7b..f3e0c69a97b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3523 struct page *page = NULL; 3523 struct page *page = NULL;
3524 unsigned int alloc_flags; 3524 unsigned int alloc_flags;
3525 unsigned long did_some_progress; 3525 unsigned long did_some_progress;
3526 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY; 3526 enum compact_priority compact_priority;
3527 enum compact_result compact_result; 3527 enum compact_result compact_result;
3528 int compaction_retries = 0; 3528 int compaction_retries;
3529 int no_progress_loops = 0; 3529 int no_progress_loops;
3530 unsigned long alloc_start = jiffies; 3530 unsigned long alloc_start = jiffies;
3531 unsigned int stall_timeout = 10 * HZ; 3531 unsigned int stall_timeout = 10 * HZ;
3532 unsigned int cpuset_mems_cookie;
3532 3533
3533 /* 3534 /*
3534 * In the slowpath, we sanity check order to avoid ever trying to 3535 * In the slowpath, we sanity check order to avoid ever trying to
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3549 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3550 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3550 gfp_mask &= ~__GFP_ATOMIC; 3551 gfp_mask &= ~__GFP_ATOMIC;
3551 3552
3553retry_cpuset:
3554 compaction_retries = 0;
3555 no_progress_loops = 0;
3556 compact_priority = DEF_COMPACT_PRIORITY;
3557 cpuset_mems_cookie = read_mems_allowed_begin();
3558 /*
3559 * We need to recalculate the starting point for the zonelist iterator
3560 * because we might have used different nodemask in the fast path, or
3561 * there was a cpuset modification and we are retrying - otherwise we
3562 * could end up iterating over non-eligible zones endlessly.
3563 */
3564 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3565 ac->high_zoneidx, ac->nodemask);
3566 if (!ac->preferred_zoneref->zone)
3567 goto nopage;
3568
3569
3552 /* 3570 /*
3553 * The fast path uses conservative alloc_flags to succeed only until 3571 * The fast path uses conservative alloc_flags to succeed only until
3554 * kswapd needs to be woken up, and to avoid the cost of setting up 3572 * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3708,6 +3726,13 @@ retry:
3708 &compaction_retries)) 3726 &compaction_retries))
3709 goto retry; 3727 goto retry;
3710 3728
3729 /*
3730 * It's possible we raced with cpuset update so the OOM would be
3731 * premature (see below the nopage: label for full explanation).
3732 */
3733 if (read_mems_allowed_retry(cpuset_mems_cookie))
3734 goto retry_cpuset;
3735
3711 /* Reclaim has failed us, start killing things */ 3736 /* Reclaim has failed us, start killing things */
3712 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3737 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3713 if (page) 3738 if (page)
@@ -3720,6 +3745,16 @@ retry:
3720 } 3745 }
3721 3746
3722nopage: 3747nopage:
3748 /*
3749 * When updating a task's mems_allowed or mempolicy nodemask, it is
3750 * possible to race with parallel threads in such a way that our
3751 * allocation can fail while the mask is being updated. If we are about
3752 * to fail, check if the cpuset changed during allocation and if so,
3753 * retry.
3754 */
3755 if (read_mems_allowed_retry(cpuset_mems_cookie))
3756 goto retry_cpuset;
3757
3723 warn_alloc(gfp_mask, 3758 warn_alloc(gfp_mask,
3724 "page allocation failure: order:%u", order); 3759 "page allocation failure: order:%u", order);
3725got_pg: 3760got_pg:
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3734 struct zonelist *zonelist, nodemask_t *nodemask) 3769 struct zonelist *zonelist, nodemask_t *nodemask)
3735{ 3770{
3736 struct page *page; 3771 struct page *page;
3737 unsigned int cpuset_mems_cookie;
3738 unsigned int alloc_flags = ALLOC_WMARK_LOW; 3772 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3739 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3773 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
3740 struct alloc_context ac = { 3774 struct alloc_context ac = {
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3771 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3805 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
3772 alloc_flags |= ALLOC_CMA; 3806 alloc_flags |= ALLOC_CMA;
3773 3807
3774retry_cpuset:
3775 cpuset_mems_cookie = read_mems_allowed_begin();
3776
3777 /* Dirty zone balancing only done in the fast path */ 3808 /* Dirty zone balancing only done in the fast path */
3778 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3779 3810
@@ -3784,8 +3815,13 @@ retry_cpuset:
3784 */ 3815 */
3785 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3816 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3786 ac.high_zoneidx, ac.nodemask); 3817 ac.high_zoneidx, ac.nodemask);
3787 if (!ac.preferred_zoneref) { 3818 if (!ac.preferred_zoneref->zone) {
3788 page = NULL; 3819 page = NULL;
3820 /*
3821 * This might be due to race with cpuset_current_mems_allowed
3822 * update, so make sure we retry with original nodemask in the
3823 * slow path.
3824 */
3789 goto no_zone; 3825 goto no_zone;
3790 } 3826 }
3791 3827
@@ -3794,6 +3830,7 @@ retry_cpuset:
3794 if (likely(page)) 3830 if (likely(page))
3795 goto out; 3831 goto out;
3796 3832
3833no_zone:
3797 /* 3834 /*
3798 * Runtime PM, block IO and its error handling path can deadlock 3835 * Runtime PM, block IO and its error handling path can deadlock
3799 * because I/O on the device might not complete. 3836 * because I/O on the device might not complete.
@@ -3805,21 +3842,10 @@ retry_cpuset:
3805 * Restore the original nodemask if it was potentially replaced with 3842 * Restore the original nodemask if it was potentially replaced with
3806 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 3843 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
3807 */ 3844 */
3808 if (cpusets_enabled()) 3845 if (unlikely(ac.nodemask != nodemask))
3809 ac.nodemask = nodemask; 3846 ac.nodemask = nodemask;
3810 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3811 3847
3812no_zone: 3848 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3813 /*
3814 * When updating a task's mems_allowed, it is possible to race with
3815 * parallel threads in such a way that an allocation can fail while
3816 * the mask is being updated. If a page allocation is about to fail,
3817 * check if the cpuset changed during allocation and if so, retry.
3818 */
3819 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
3820 alloc_mask = gfp_mask;
3821 goto retry_cpuset;
3822 }
3823 3849
3824out: 3850out:
3825 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 3851 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7248 .zone = page_zone(pfn_to_page(start)), 7274 .zone = page_zone(pfn_to_page(start)),
7249 .mode = MIGRATE_SYNC, 7275 .mode = MIGRATE_SYNC,
7250 .ignore_skip_hint = true, 7276 .ignore_skip_hint = true,
7277 .gfp_mask = GFP_KERNEL,
7251 }; 7278 };
7252 INIT_LIST_HEAD(&cc.migratepages); 7279 INIT_LIST_HEAD(&cc.migratepages);
7253 7280
diff --git a/mm/shmem.c b/mm/shmem.c
index bb53285a1d99..3a7587a0314d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
415 struct shrink_control *sc, unsigned long nr_to_split) 415 struct shrink_control *sc, unsigned long nr_to_split)
416{ 416{
417 LIST_HEAD(list), *pos, *next; 417 LIST_HEAD(list), *pos, *next;
418 LIST_HEAD(to_remove);
418 struct inode *inode; 419 struct inode *inode;
419 struct shmem_inode_info *info; 420 struct shmem_inode_info *info;
420 struct page *page; 421 struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
441 /* Check if there's anything to gain */ 442 /* Check if there's anything to gain */
442 if (round_up(inode->i_size, PAGE_SIZE) == 443 if (round_up(inode->i_size, PAGE_SIZE) ==
443 round_up(inode->i_size, HPAGE_PMD_SIZE)) { 444 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
444 list_del_init(&info->shrinklist); 445 list_move(&info->shrinklist, &to_remove);
445 removed++; 446 removed++;
446 iput(inode);
447 goto next; 447 goto next;
448 } 448 }
449 449
@@ -454,6 +454,13 @@ next:
454 } 454 }
455 spin_unlock(&sbinfo->shrinklist_lock); 455 spin_unlock(&sbinfo->shrinklist_lock);
456 456
457 list_for_each_safe(pos, next, &to_remove) {
458 info = list_entry(pos, struct shmem_inode_info, shrinklist);
459 inode = &info->vfs_inode;
460 list_del_init(&info->shrinklist);
461 iput(inode);
462 }
463
457 list_for_each_safe(pos, next, &list) { 464 list_for_each_safe(pos, next, &list) {
458 int ret; 465 int ret;
459 466
diff --git a/mm/slub.c b/mm/slub.c
index 067598a00849..7aa6f433f4de 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
496 return 1; 496 return 1;
497} 497}
498 498
499static void print_section(char *text, u8 *addr, unsigned int length) 499static void print_section(char *level, char *text, u8 *addr,
500 unsigned int length)
500{ 501{
501 metadata_access_enable(); 502 metadata_access_enable();
502 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, 503 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
503 length, 1); 504 length, 1);
504 metadata_access_disable(); 505 metadata_access_disable();
505} 506}
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
636 p, p - addr, get_freepointer(s, p)); 637 p, p - addr, get_freepointer(s, p));
637 638
638 if (s->flags & SLAB_RED_ZONE) 639 if (s->flags & SLAB_RED_ZONE)
639 print_section("Redzone ", p - s->red_left_pad, s->red_left_pad); 640 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
641 s->red_left_pad);
640 else if (p > addr + 16) 642 else if (p > addr + 16)
641 print_section("Bytes b4 ", p - 16, 16); 643 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
642 644
643 print_section("Object ", p, min_t(unsigned long, s->object_size, 645 print_section(KERN_ERR, "Object ", p,
644 PAGE_SIZE)); 646 min_t(unsigned long, s->object_size, PAGE_SIZE));
645 if (s->flags & SLAB_RED_ZONE) 647 if (s->flags & SLAB_RED_ZONE)
646 print_section("Redzone ", p + s->object_size, 648 print_section(KERN_ERR, "Redzone ", p + s->object_size,
647 s->inuse - s->object_size); 649 s->inuse - s->object_size);
648 650
649 if (s->offset) 651 if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
658 660
659 if (off != size_from_object(s)) 661 if (off != size_from_object(s))
660 /* Beginning of the filler is the free pointer */ 662 /* Beginning of the filler is the free pointer */
661 print_section("Padding ", p + off, size_from_object(s) - off); 663 print_section(KERN_ERR, "Padding ", p + off,
664 size_from_object(s) - off);
662 665
663 dump_stack(); 666 dump_stack();
664} 667}
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
820 end--; 823 end--;
821 824
822 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 825 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
823 print_section("Padding ", end - remainder, remainder); 826 print_section(KERN_ERR, "Padding ", end - remainder, remainder);
824 827
825 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 828 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
826 return 0; 829 return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
973 page->freelist); 976 page->freelist);
974 977
975 if (!alloc) 978 if (!alloc)
976 print_section("Object ", (void *)object, 979 print_section(KERN_INFO, "Object ", (void *)object,
977 s->object_size); 980 s->object_size);
978 981
979 dump_stack(); 982 dump_stack();
diff --git a/mm/zswap.c b/mm/zswap.c
index 067a0d62f318..cabf09e0128b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
78 78
79/* Enable/disable zswap (disabled by default) */ 79/* Enable/disable zswap (disabled by default) */
80static bool zswap_enabled; 80static bool zswap_enabled;
81module_param_named(enabled, zswap_enabled, bool, 0644); 81static int zswap_enabled_param_set(const char *,
82 const struct kernel_param *);
83static struct kernel_param_ops zswap_enabled_param_ops = {
84 .set = zswap_enabled_param_set,
85 .get = param_get_bool,
86};
87module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
82 88
83/* Crypto compressor to use */ 89/* Crypto compressor to use */
84#define ZSWAP_COMPRESSOR_DEFAULT "lzo" 90#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
176/* used by param callback function */ 182/* used by param callback function */
177static bool zswap_init_started; 183static bool zswap_init_started;
178 184
185/* fatal error during init */
186static bool zswap_init_failed;
187
179/********************************* 188/*********************************
180* helpers and fwd declarations 189* helpers and fwd declarations
181**********************************/ 190**********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
624 char *s = strstrip((char *)val); 633 char *s = strstrip((char *)val);
625 int ret; 634 int ret;
626 635
636 if (zswap_init_failed) {
637 pr_err("can't set param, initialization failed\n");
638 return -ENODEV;
639 }
640
627 /* no change required */ 641 /* no change required */
628 if (!strcmp(s, *(char **)kp->arg)) 642 if (!strcmp(s, *(char **)kp->arg))
629 return 0; 643 return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
703 return __zswap_param_set(val, kp, NULL, zswap_compressor); 717 return __zswap_param_set(val, kp, NULL, zswap_compressor);
704} 718}
705 719
720static int zswap_enabled_param_set(const char *val,
721 const struct kernel_param *kp)
722{
723 if (zswap_init_failed) {
724 pr_err("can't enable, initialization failed\n");
725 return -ENODEV;
726 }
727
728 return param_set_bool(val, kp);
729}
730
706/********************************* 731/*********************************
707* writeback code 732* writeback code
708**********************************/ 733**********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
1201dstmem_fail: 1226dstmem_fail:
1202 zswap_entry_cache_destroy(); 1227 zswap_entry_cache_destroy();
1203cache_fail: 1228cache_fail:
1229 /* if built-in, we aren't unloaded on failure; don't allow use */
1230 zswap_init_failed = true;
1231 zswap_enabled = false;
1204 return -ENOMEM; 1232 return -ENOMEM;
1205} 1233}
1206/* must be late so crypto has time to come up */ 1234/* must be late so crypto has time to come up */
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 9c561e683f4b..0854ebd8613e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
474 primary_if = batadv_primary_if_get_selected(bat_priv); 474 primary_if = batadv_primary_if_get_selected(bat_priv);
475 if (!primary_if) { 475 if (!primary_if) {
476 ret = -EINVAL; 476 ret = -EINVAL;
477 goto put_primary_if; 477 goto free_skb;
478 } 478 }
479 479
480 /* Create one header to be copied to all fragments */ 480 /* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
503 if (!skb_fragment) { 503 if (!skb_fragment) {
504 ret = -ENOMEM; 504 ret = -ENOMEM;
505 goto free_skb; 505 goto put_primary_if;
506 } 506 }
507 507
508 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 508 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
511 ret = batadv_send_unicast_skb(skb_fragment, neigh_node); 511 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
512 if (ret != NET_XMIT_SUCCESS) { 512 if (ret != NET_XMIT_SUCCESS) {
513 ret = NET_XMIT_DROP; 513 ret = NET_XMIT_DROP;
514 goto free_skb; 514 goto put_primary_if;
515 } 515 }
516 516
517 frag_header.no++; 517 frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
519 /* The initial check in this function should cover this case */ 519 /* The initial check in this function should cover this case */
520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { 520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
521 ret = -EINVAL; 521 ret = -EINVAL;
522 goto free_skb; 522 goto put_primary_if;
523 } 523 }
524 } 524 }
525 525
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
527 if (batadv_skb_head_push(skb, header_size) < 0 || 527 if (batadv_skb_head_push(skb, header_size) < 0 ||
528 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) { 528 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
529 ret = -ENOMEM; 529 ret = -ENOMEM;
530 goto free_skb; 530 goto put_primary_if;
531 } 531 }
532 532
533 memcpy(skb->data, &frag_header, header_size); 533 memcpy(skb->data, &frag_header, header_size);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 71c7453268c1..7109b389ea58 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
781 return 0; 781 return 0;
782} 782}
783 783
784static int br_dev_newlink(struct net *src_net, struct net_device *dev,
785 struct nlattr *tb[], struct nlattr *data[])
786{
787 struct net_bridge *br = netdev_priv(dev);
788
789 if (tb[IFLA_ADDRESS]) {
790 spin_lock_bh(&br->lock);
791 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
792 spin_unlock_bh(&br->lock);
793 }
794
795 return register_netdevice(dev);
796}
797
798static int br_port_slave_changelink(struct net_device *brdev, 784static int br_port_slave_changelink(struct net_device *brdev,
799 struct net_device *dev, 785 struct net_device *dev,
800 struct nlattr *tb[], 786 struct nlattr *tb[],
@@ -1115,6 +1101,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1115 return 0; 1101 return 0;
1116} 1102}
1117 1103
1104static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1105 struct nlattr *tb[], struct nlattr *data[])
1106{
1107 struct net_bridge *br = netdev_priv(dev);
1108 int err;
1109
1110 if (tb[IFLA_ADDRESS]) {
1111 spin_lock_bh(&br->lock);
1112 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1113 spin_unlock_bh(&br->lock);
1114 }
1115
1116 err = br_changelink(dev, tb, data);
1117 if (err)
1118 return err;
1119
1120 return register_netdevice(dev);
1121}
1122
1118static size_t br_get_size(const struct net_device *brdev) 1123static size_t br_get_size(const struct net_device *brdev)
1119{ 1124{
1120 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1125 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 1108079d934f..5488e4a6ccd0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
445 * @func: callback function on filter match 445 * @func: callback function on filter match
446 * @data: returned parameter for callback function 446 * @data: returned parameter for callback function
447 * @ident: string for calling module identification 447 * @ident: string for calling module identification
448 * @sk: socket pointer (might be NULL)
448 * 449 *
449 * Description: 450 * Description:
450 * Invokes the callback function with the received sk_buff and the given 451 * Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
468 */ 469 */
469int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, 470int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
470 void (*func)(struct sk_buff *, void *), void *data, 471 void (*func)(struct sk_buff *, void *), void *data,
471 char *ident) 472 char *ident, struct sock *sk)
472{ 473{
473 struct receiver *r; 474 struct receiver *r;
474 struct hlist_head *rl; 475 struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
496 r->func = func; 497 r->func = func;
497 r->data = data; 498 r->data = data;
498 r->ident = ident; 499 r->ident = ident;
500 r->sk = sk;
499 501
500 hlist_add_head_rcu(&r->list, rl); 502 hlist_add_head_rcu(&r->list, rl);
501 d->entries++; 503 d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
520static void can_rx_delete_receiver(struct rcu_head *rp) 522static void can_rx_delete_receiver(struct rcu_head *rp)
521{ 523{
522 struct receiver *r = container_of(rp, struct receiver, rcu); 524 struct receiver *r = container_of(rp, struct receiver, rcu);
525 struct sock *sk = r->sk;
523 526
524 kmem_cache_free(rcv_cache, r); 527 kmem_cache_free(rcv_cache, r);
528 if (sk)
529 sock_put(sk);
525} 530}
526 531
527/** 532/**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
596 spin_unlock(&can_rcvlists_lock); 601 spin_unlock(&can_rcvlists_lock);
597 602
598 /* schedule the receiver item for deletion */ 603 /* schedule the receiver item for deletion */
599 if (r) 604 if (r) {
605 if (r->sk)
606 sock_hold(r->sk);
600 call_rcu(&r->rcu, can_rx_delete_receiver); 607 call_rcu(&r->rcu, can_rx_delete_receiver);
608 }
601} 609}
602EXPORT_SYMBOL(can_rx_unregister); 610EXPORT_SYMBOL(can_rx_unregister);
603 611
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fca0fe9fc45a..b86f5129e838 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -50,13 +50,14 @@
50 50
51struct receiver { 51struct receiver {
52 struct hlist_node list; 52 struct hlist_node list;
53 struct rcu_head rcu;
54 canid_t can_id; 53 canid_t can_id;
55 canid_t mask; 54 canid_t mask;
56 unsigned long matches; 55 unsigned long matches;
57 void (*func)(struct sk_buff *, void *); 56 void (*func)(struct sk_buff *, void *);
58 void *data; 57 void *data;
59 char *ident; 58 char *ident;
59 struct sock *sk;
60 struct rcu_head rcu;
60}; 61};
61 62
62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) 63#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 21ac75390e3d..95d13b233c65 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
734 734
735static void bcm_remove_op(struct bcm_op *op) 735static void bcm_remove_op(struct bcm_op *op)
736{ 736{
737 hrtimer_cancel(&op->timer); 737 if (op->tsklet.func) {
738 hrtimer_cancel(&op->thrtimer); 738 while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
739 739 test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
740 if (op->tsklet.func) 740 hrtimer_active(&op->timer)) {
741 tasklet_kill(&op->tsklet); 741 hrtimer_cancel(&op->timer);
742 tasklet_kill(&op->tsklet);
743 }
744 }
742 745
743 if (op->thrtsklet.func) 746 if (op->thrtsklet.func) {
744 tasklet_kill(&op->thrtsklet); 747 while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
748 test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
749 hrtimer_active(&op->thrtimer)) {
750 hrtimer_cancel(&op->thrtimer);
751 tasklet_kill(&op->thrtsklet);
752 }
753 }
745 754
746 if ((op->frames) && (op->frames != &op->sframe)) 755 if ((op->frames) && (op->frames != &op->sframe))
747 kfree(op->frames); 756 kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1216 err = can_rx_register(dev, op->can_id, 1225 err = can_rx_register(dev, op->can_id,
1217 REGMASK(op->can_id), 1226 REGMASK(op->can_id),
1218 bcm_rx_handler, op, 1227 bcm_rx_handler, op,
1219 "bcm"); 1228 "bcm", sk);
1220 1229
1221 op->rx_reg_dev = dev; 1230 op->rx_reg_dev = dev;
1222 dev_put(dev); 1231 dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1225 } else 1234 } else
1226 err = can_rx_register(NULL, op->can_id, 1235 err = can_rx_register(NULL, op->can_id,
1227 REGMASK(op->can_id), 1236 REGMASK(op->can_id),
1228 bcm_rx_handler, op, "bcm"); 1237 bcm_rx_handler, op, "bcm", sk);
1229 if (err) { 1238 if (err) {
1230 /* this bcm rx op is broken -> remove it */ 1239 /* this bcm rx op is broken -> remove it */
1231 list_del(&op->list); 1240 list_del(&op->list);
diff --git a/net/can/gw.c b/net/can/gw.c
index a54ab0c82104..7056a1a2bb70 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
442{ 442{
443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, 443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
444 gwj->ccgw.filter.can_mask, can_can_gw_rcv, 444 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
445 gwj, "gw"); 445 gwj, "gw", NULL);
446} 446}
447 447
448static inline void cgw_unregister_filter(struct cgw_job *gwj) 448static inline void cgw_unregister_filter(struct cgw_job *gwj)
diff --git a/net/can/raw.c b/net/can/raw.c
index b075f028d7e2..6dc546a06673 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
190 for (i = 0; i < count; i++) { 190 for (i = 0; i < count; i++) {
191 err = can_rx_register(dev, filter[i].can_id, 191 err = can_rx_register(dev, filter[i].can_id,
192 filter[i].can_mask, 192 filter[i].can_mask,
193 raw_rcv, sk, "raw"); 193 raw_rcv, sk, "raw", sk);
194 if (err) { 194 if (err) {
195 /* clean up successfully registered filters */ 195 /* clean up successfully registered filters */
196 while (--i >= 0) 196 while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
211 211
212 if (err_mask) 212 if (err_mask)
213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, 213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
214 raw_rcv, sk, "raw"); 214 raw_rcv, sk, "raw", sk);
215 215
216 return err; 216 return err;
217} 217}
diff --git a/net/core/dev.c b/net/core/dev.c
index 07b307b0b414..7f218e095361 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2795,9 +2795,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2795 if (skb->ip_summed != CHECKSUM_NONE && 2795 if (skb->ip_summed != CHECKSUM_NONE &&
2796 !can_checksum_protocol(features, type)) { 2796 !can_checksum_protocol(features, type)) {
2797 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2797 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2798 } else if (illegal_highdma(skb->dev, skb)) {
2799 features &= ~NETIF_F_SG;
2800 } 2798 }
2799 if (illegal_highdma(skb->dev, skb))
2800 features &= ~NETIF_F_SG;
2801 2801
2802 return features; 2802 return features;
2803} 2803}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e23766c7e3ba..236a21e3c878 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1712static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 1712static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1713 void __user *useraddr) 1713 void __user *useraddr)
1714{ 1714{
1715 struct ethtool_channels channels, max; 1715 struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
1716 u32 max_rx_in_use = 0; 1716 u32 max_rx_in_use = 0;
1717 1717
1718 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 1718 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 71bb3e2eca08..b3eef90b2df9 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
386 .fill_encap = bpf_fill_encap_info, 386 .fill_encap = bpf_fill_encap_info,
387 .get_encap_size = bpf_encap_nlsize, 387 .get_encap_size = bpf_encap_nlsize,
388 .cmp_encap = bpf_encap_cmp, 388 .cmp_encap = bpf_encap_cmp,
389 .owner = THIS_MODULE,
389}; 390};
390 391
391static int __init bpf_lwt_init(void) 392static int __init bpf_lwt_init(void)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index a5d4e866ce88..c23465005f2f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,6 +26,7 @@
26#include <net/lwtunnel.h> 26#include <net/lwtunnel.h>
27#include <net/rtnetlink.h> 27#include <net/rtnetlink.h>
28#include <net/ip6_fib.h> 28#include <net/ip6_fib.h>
29#include <net/nexthop.h>
29 30
30#ifdef CONFIG_MODULES 31#ifdef CONFIG_MODULES
31 32
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
114 ret = -EOPNOTSUPP; 115 ret = -EOPNOTSUPP;
115 rcu_read_lock(); 116 rcu_read_lock();
116 ops = rcu_dereference(lwtun_encaps[encap_type]); 117 ops = rcu_dereference(lwtun_encaps[encap_type]);
118 if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
119 ret = ops->build_state(dev, encap, family, cfg, lws);
120 if (ret)
121 module_put(ops->owner);
122 }
123 rcu_read_unlock();
124
125 return ret;
126}
127EXPORT_SYMBOL(lwtunnel_build_state);
128
129int lwtunnel_valid_encap_type(u16 encap_type)
130{
131 const struct lwtunnel_encap_ops *ops;
132 int ret = -EINVAL;
133
134 if (encap_type == LWTUNNEL_ENCAP_NONE ||
135 encap_type > LWTUNNEL_ENCAP_MAX)
136 return ret;
137
138 rcu_read_lock();
139 ops = rcu_dereference(lwtun_encaps[encap_type]);
140 rcu_read_unlock();
117#ifdef CONFIG_MODULES 141#ifdef CONFIG_MODULES
118 if (!ops) { 142 if (!ops) {
119 const char *encap_type_str = lwtunnel_encap_str(encap_type); 143 const char *encap_type_str = lwtunnel_encap_str(encap_type);
120 144
121 if (encap_type_str) { 145 if (encap_type_str) {
122 rcu_read_unlock(); 146 __rtnl_unlock();
123 request_module("rtnl-lwt-%s", encap_type_str); 147 request_module("rtnl-lwt-%s", encap_type_str);
148 rtnl_lock();
149
124 rcu_read_lock(); 150 rcu_read_lock();
125 ops = rcu_dereference(lwtun_encaps[encap_type]); 151 ops = rcu_dereference(lwtun_encaps[encap_type]);
152 rcu_read_unlock();
126 } 153 }
127 } 154 }
128#endif 155#endif
129 if (likely(ops && ops->build_state)) 156 return ops ? 0 : -EOPNOTSUPP;
130 ret = ops->build_state(dev, encap, family, cfg, lws); 157}
131 rcu_read_unlock(); 158EXPORT_SYMBOL(lwtunnel_valid_encap_type);
132 159
133 return ret; 160int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
161{
162 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
163 struct nlattr *nla_entype;
164 struct nlattr *attrs;
165 struct nlattr *nla;
166 u16 encap_type;
167 int attrlen;
168
169 while (rtnh_ok(rtnh, remaining)) {
170 attrlen = rtnh_attrlen(rtnh);
171 if (attrlen > 0) {
172 attrs = rtnh_attrs(rtnh);
173 nla = nla_find(attrs, attrlen, RTA_ENCAP);
174 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
175
176 if (nla_entype) {
177 encap_type = nla_get_u16(nla_entype);
178
179 if (lwtunnel_valid_encap_type(encap_type) != 0)
180 return -EOPNOTSUPP;
181 }
182 }
183 rtnh = rtnh_next(rtnh, &remaining);
184 }
185
186 return 0;
134} 187}
135EXPORT_SYMBOL(lwtunnel_build_state); 188EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
136 189
137void lwtstate_free(struct lwtunnel_state *lws) 190void lwtstate_free(struct lwtunnel_state *lws)
138{ 191{
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
144 } else { 197 } else {
145 kfree(lws); 198 kfree(lws);
146 } 199 }
200 module_put(ops->owner);
147} 201}
148EXPORT_SYMBOL(lwtstate_free); 202EXPORT_SYMBOL(lwtstate_free);
149 203
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index adfc790f7193..c4e879c02186 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
227 opt = ireq->ipv6_opt; 227 opt = ireq->ipv6_opt;
228 if (!opt) 228 if (!opt)
229 opt = rcu_dereference(np->opt); 229 opt = rcu_dereference(np->opt);
230 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 230 err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
231 rcu_read_unlock(); 231 rcu_read_unlock();
232 err = net_xmit_eval(err); 232 err = net_xmit_eval(err);
233 } 233 }
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
282 if (!IS_ERR(dst)) { 282 if (!IS_ERR(dst)) {
283 skb_dst_set(skb, dst); 283 skb_dst_set(skb, dst);
284 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); 284 ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
287 return; 287 return;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 68c9eea00518..7d4596110851 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1105,10 +1105,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
1105 /* Use already configured phy mode */ 1105 /* Use already configured phy mode */
1106 if (p->phy_interface == PHY_INTERFACE_MODE_NA) 1106 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
1107 p->phy_interface = p->phy->interface; 1107 p->phy_interface = p->phy->interface;
1108 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 1108 return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
1109 p->phy_interface); 1109 p->phy_interface);
1110
1111 return 0;
1112} 1110}
1113 1111
1114static int dsa_slave_phy_setup(struct dsa_slave_priv *p, 1112static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1203,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1203{ 1201{
1204 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1202 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1205 1203
1204 netif_device_detach(slave_dev);
1205
1206 if (p->phy) { 1206 if (p->phy) {
1207 phy_stop(p->phy); 1207 phy_stop(p->phy);
1208 p->old_pause = -1; 1208 p->old_pause = -1;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eae0332b0e8c..7db2ad2e82d3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -46,6 +46,7 @@
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h> 47#include <net/xfrm.h>
48#include <net/l3mdev.h> 48#include <net/l3mdev.h>
49#include <net/lwtunnel.h>
49#include <trace/events/fib.h> 50#include <trace/events/fib.h>
50 51
51#ifndef CONFIG_IP_MULTIPLE_TABLES 52#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
677 cfg->fc_mx_len = nla_len(attr); 678 cfg->fc_mx_len = nla_len(attr);
678 break; 679 break;
679 case RTA_MULTIPATH: 680 case RTA_MULTIPATH:
681 err = lwtunnel_valid_encap_type_attr(nla_data(attr),
682 nla_len(attr));
683 if (err < 0)
684 goto errout;
680 cfg->fc_mp = nla_data(attr); 685 cfg->fc_mp = nla_data(attr);
681 cfg->fc_mp_len = nla_len(attr); 686 cfg->fc_mp_len = nla_len(attr);
682 break; 687 break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
691 break; 696 break;
692 case RTA_ENCAP_TYPE: 697 case RTA_ENCAP_TYPE:
693 cfg->fc_encap_type = nla_get_u16(attr); 698 cfg->fc_encap_type = nla_get_u16(attr);
699 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
700 if (err < 0)
701 goto errout;
694 break; 702 break;
695 } 703 }
696 } 704 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fac275c48108..b67719f45953 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1629 sk->sk_protocol = ip_hdr(skb)->protocol; 1629 sk->sk_protocol = ip_hdr(skb)->protocol;
1630 sk->sk_bound_dev_if = arg->bound_dev_if; 1630 sk->sk_bound_dev_if = arg->bound_dev_if;
1631 sk->sk_sndbuf = sysctl_wmem_default; 1631 sk->sk_sndbuf = sysctl_wmem_default;
1632 sk->sk_mark = fl4.flowi4_mark;
1632 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1633 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1633 len, 0, &ipc, &rt, MSG_DONTWAIT); 1634 len, 0, &ipc, &rt, MSG_DONTWAIT);
1634 if (unlikely(err)) { 1635 if (unlikely(err)) {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index fed3d29f9eb3..0fd1976ab63b 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
313 .fill_encap = ip_tun_fill_encap_info, 313 .fill_encap = ip_tun_fill_encap_info,
314 .get_encap_size = ip_tun_encap_nlsize, 314 .get_encap_size = ip_tun_encap_nlsize,
315 .cmp_encap = ip_tun_cmp_encap, 315 .cmp_encap = ip_tun_cmp_encap,
316 .owner = THIS_MODULE,
316}; 317};
317 318
318static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { 319static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
403 .fill_encap = ip6_tun_fill_encap_info, 404 .fill_encap = ip6_tun_fill_encap_info,
404 .get_encap_size = ip6_tun_encap_nlsize, 405 .get_encap_size = ip6_tun_encap_nlsize,
405 .cmp_encap = ip_tun_cmp_encap, 406 .cmp_encap = ip_tun_cmp_encap,
407 .owner = THIS_MODULE,
406}; 408};
407 409
408void __init ip_tunnel_core_init(void) 410void __init ip_tunnel_core_init(void)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a6b8c1a4102b..0a783cd73faf 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
144 rcu_read_lock_bh(); 144 rcu_read_lock_bh();
145 c = __clusterip_config_find(net, clusterip); 145 c = __clusterip_config_find(net, clusterip);
146 if (c) { 146 if (c) {
147 if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount))) 147#ifdef CONFIG_PROC_FS
148 if (!c->pde)
149 c = NULL;
150 else
151#endif
152 if (unlikely(!atomic_inc_not_zero(&c->refcount)))
148 c = NULL; 153 c = NULL;
149 else if (entry) 154 else if (entry)
150 atomic_inc(&c->entries); 155 atomic_inc(&c->entries);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index f273098e48fd..37fb9552e858 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
63 return dev_match || flags & XT_RPFILTER_LOOSE; 63 return dev_match || flags & XT_RPFILTER_LOOSE;
64} 64}
65 65
66static bool rpfilter_is_local(const struct sk_buff *skb) 66static bool
67rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
67{ 68{
68 const struct rtable *rt = skb_rtable(skb); 69 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
69 return rt && (rt->rt_flags & RTCF_LOCAL);
70} 70}
71 71
72static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 72static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
79 info = par->matchinfo; 79 info = par->matchinfo;
80 invert = info->flags & XT_RPFILTER_INVERT; 80 invert = info->flags & XT_RPFILTER_INVERT;
81 81
82 if (rpfilter_is_local(skb)) 82 if (rpfilter_is_loopback(skb, xt_in(par)))
83 return true ^ invert; 83 return true ^ invert;
84 84
85 iph = ip_hdr(skb); 85 iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index fd8220213afc..146d86105183 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
126 /* ip_route_me_harder expects skb->dst to be set */ 126 /* ip_route_me_harder expects skb->dst to be set */
127 skb_dst_set_noref(nskb, skb_dst(oldskb)); 127 skb_dst_set_noref(nskb, skb_dst(oldskb));
128 128
129 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
130
129 skb_reserve(nskb, LL_MAX_HEADER); 131 skb_reserve(nskb, LL_MAX_HEADER);
130 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 132 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
131 ip4_dst_hoplimit(skb_dst(nskb))); 133 ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 965b1a161369..2981291910dd 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
26 return addr; 26 return addr;
27} 27}
28 28
29static bool fib4_is_local(const struct sk_buff *skb)
30{
31 const struct rtable *rt = skb_rtable(skb);
32
33 return rt && (rt->rt_flags & RTCF_LOCAL);
34}
35
36#define DSCP_BITS 0xfc 29#define DSCP_BITS 0xfc
37 30
38void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, 31void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
95 else 88 else
96 oif = NULL; 89 oif = NULL;
97 90
98 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) { 91 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
99 nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 92 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
93 nft_fib_store_result(dest, priv->result, pkt,
94 nft_in(pkt)->ifindex);
100 return; 95 return;
101 } 96 }
102 97
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
131 switch (res.type) { 126 switch (res.type) {
132 case RTN_UNICAST: 127 case RTN_UNICAST:
133 break; 128 break;
134 case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */ 129 case RTN_LOCAL: /* Should not see RTN_LOCAL here */
135 return; 130 return;
136 default: 131 default:
137 break; 132 break;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index f51919535ca7..dd2560c83a85 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
205 * scaled. So correct it appropriately. 205 * scaled. So correct it appropriately.
206 */ 206 */
207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
208 tp->max_window = tp->snd_wnd;
208 209
209 /* Activate the retrans timer so that SYNACK can be retransmitted. 210 /* Activate the retrans timer so that SYNACK can be retransmitted.
210 * The request socket is not added to the ehash 211 * The request socket is not added to the ehash
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6c790754ae3e..41dcbd568cbe 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5078,7 +5078,7 @@ static void tcp_check_space(struct sock *sk)
5078 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 5078 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
5079 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 5079 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
5080 /* pairs with tcp_poll() */ 5080 /* pairs with tcp_poll() */
5081 smp_mb__after_atomic(); 5081 smp_mb();
5082 if (sk->sk_socket && 5082 if (sk->sk_socket &&
5083 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5083 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
5084 tcp_new_space(sk); 5084 tcp_new_space(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1d5331a1b1dc..8ce50dc3ab8c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
2518 int full_space = min_t(int, tp->window_clamp, allowed_space); 2518 int full_space = min_t(int, tp->window_clamp, allowed_space);
2519 int window; 2519 int window;
2520 2520
2521 if (mss > full_space) 2521 if (unlikely(mss > full_space)) {
2522 mss = full_space; 2522 mss = full_space;
2523 2523 if (mss <= 0)
2524 return 0;
2525 }
2524 if (free_space < (full_space >> 1)) { 2526 if (free_space < (full_space >> 1)) {
2525 icsk->icsk_ack.quick = 0; 2527 icsk->icsk_ack.quick = 0;
2526 2528
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c1e124bc8e1e..f60e88e56255 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5540,8 +5540,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5540 struct net_device *dev; 5540 struct net_device *dev;
5541 struct inet6_dev *idev; 5541 struct inet6_dev *idev;
5542 5542
5543 rcu_read_lock(); 5543 for_each_netdev(net, dev) {
5544 for_each_netdev_rcu(net, dev) {
5545 idev = __in6_dev_get(dev); 5544 idev = __in6_dev_get(dev);
5546 if (idev) { 5545 if (idev) {
5547 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 5546 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5550,7 +5549,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5550 dev_disable_change(idev); 5549 dev_disable_change(idev);
5551 } 5550 }
5552 } 5551 }
5553 rcu_read_unlock();
5554} 5552}
5555 5553
5556static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) 5554static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index a7bc54ab46e2..13b5e85fe0d5 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
238 .fill_encap = ila_fill_encap_info, 238 .fill_encap = ila_fill_encap_info,
239 .get_encap_size = ila_encap_nlsize, 239 .get_encap_size = ila_encap_nlsize,
240 .cmp_encap = ila_encap_cmp, 240 .cmp_encap = ila_encap_cmp,
241 .owner = THIS_MODULE,
241}; 242};
242 243
243int ila_lwt_init(void) 244int ila_lwt_init(void)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 7396e75e161b..75c308239243 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
176 /* Restore final destination back after routing done */ 176 /* Restore final destination back after routing done */
177 fl6.daddr = sk->sk_v6_daddr; 177 fl6.daddr = sk->sk_v6_daddr;
178 178
179 res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), 179 res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
180 np->tclass); 180 np->tclass);
181 rcu_read_unlock(); 181 rcu_read_unlock();
182 return res; 182 return res;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 75b6108234dd..558631860d91 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -582,6 +582,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
582 return -1; 582 return -1;
583 583
584 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 584 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
585 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
586 ipv6h = ipv6_hdr(skb);
587
585 if (offset > 0) { 588 if (offset > 0) {
586 struct ipv6_tlv_tnl_enc_lim *tel; 589 struct ipv6_tlv_tnl_enc_lim *tel;
587 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 590 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 38122d04fadc..b6a94ff0bbd0 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
172 * which are using proper atomic operations or spinlocks. 172 * which are using proper atomic operations or spinlocks.
173 */ 173 */
174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
175 struct ipv6_txoptions *opt, int tclass) 175 __u32 mark, struct ipv6_txoptions *opt, int tclass)
176{ 176{
177 struct net *net = sock_net(sk); 177 struct net *net = sock_net(sk);
178 const struct ipv6_pinfo *np = inet6_sk(sk); 178 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
240 240
241 skb->protocol = htons(ETH_P_IPV6); 241 skb->protocol = htons(ETH_P_IPV6);
242 skb->priority = sk->sk_priority; 242 skb->priority = sk->sk_priority;
243 skb->mark = sk->sk_mark; 243 skb->mark = mark;
244 244
245 mtu = dst_mtu(dst); 245 mtu = dst_mtu(dst);
246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { 246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
@@ -1344,7 +1344,7 @@ emsgsize:
1344 */ 1344 */
1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && 1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1346 headersize == sizeof(struct ipv6hdr) && 1346 headersize == sizeof(struct ipv6hdr) &&
1347 length < mtu - headersize && 1347 length <= mtu - headersize &&
1348 !(flags & MSG_MORE) && 1348 !(flags & MSG_MORE) &&
1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1350 csummode = CHECKSUM_PARTIAL; 1350 csummode = CHECKSUM_PARTIAL;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 753d6d0860fb..75fac933c209 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
400 400
401__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 401__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402{ 402{
403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
404 __u8 nexthdr = ipv6h->nexthdr; 404 unsigned int nhoff = raw - skb->data;
405 __u16 off = sizeof(*ipv6h); 405 unsigned int off = nhoff + sizeof(*ipv6h);
406 u8 next, nexthdr = ipv6h->nexthdr;
406 407
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 408 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 __u16 optlen = 0;
409 struct ipv6_opt_hdr *hdr; 409 struct ipv6_opt_hdr *hdr;
410 if (raw + off + sizeof(*hdr) > skb->data && 410 u16 optlen;
411 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 411
412 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
412 break; 413 break;
413 414
414 hdr = (struct ipv6_opt_hdr *) (raw + off); 415 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 417 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off) 418 if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
422 } else { 423 } else {
423 optlen = ipv6_optlen(hdr); 424 optlen = ipv6_optlen(hdr);
424 } 425 }
426 /* cache hdr->nexthdr, since pskb_may_pull() might
427 * invalidate hdr
428 */
429 next = hdr->nexthdr;
425 if (nexthdr == NEXTHDR_DEST) { 430 if (nexthdr == NEXTHDR_DEST) {
426 __u16 i = off + 2; 431 u16 i = 2;
432
433 /* Remember : hdr is no longer valid at this point. */
434 if (!pskb_may_pull(skb, off + optlen))
435 break;
436
427 while (1) { 437 while (1) {
428 struct ipv6_tlv_tnl_enc_lim *tel; 438 struct ipv6_tlv_tnl_enc_lim *tel;
429 439
430 /* No more room for encapsulation limit */ 440 /* No more room for encapsulation limit */
431 if (i + sizeof (*tel) > off + optlen) 441 if (i + sizeof(*tel) > optlen)
432 break; 442 break;
433 443
434 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 444 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
435 /* return index of option if found and valid */ 445 /* return index of option if found and valid */
436 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
437 tel->length == 1) 447 tel->length == 1)
438 return i; 448 return i + off - nhoff;
439 /* else jump to next option */ 449 /* else jump to next option */
440 if (tel->type) 450 if (tel->type)
441 i += tel->length + 2; 451 i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
443 i++; 453 i++;
444 } 454 }
445 } 455 }
446 nexthdr = hdr->nexthdr; 456 nexthdr = next;
447 off += optlen; 457 off += optlen;
448 } 458 }
449 return 0; 459 return 0;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1303 fl6.flowlabel = key->label; 1313 fl6.flowlabel = key->label;
1304 } else { 1314 } else {
1305 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1315 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1316 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1317 ipv6h = ipv6_hdr(skb);
1306 if (offset > 0) { 1318 if (offset > 0) {
1307 struct ipv6_tlv_tnl_enc_lim *tel; 1319 struct ipv6_tlv_tnl_enc_lim *tel;
1308 1320
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index d5263dc364a9..b12e61b7b16c 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
72 return ret; 72 return ret;
73} 73}
74 74
75static bool rpfilter_is_local(const struct sk_buff *skb) 75static bool
76rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
76{ 77{
77 const struct rt6_info *rt = (const void *) skb_dst(skb); 78 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
78 return rt && (rt->rt6i_flags & RTF_LOCAL);
79} 79}
80 80
81static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 81static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
85 struct ipv6hdr *iph; 85 struct ipv6hdr *iph;
86 bool invert = info->flags & XT_RPFILTER_INVERT; 86 bool invert = info->flags & XT_RPFILTER_INVERT;
87 87
88 if (rpfilter_is_local(skb)) 88 if (rpfilter_is_loopback(skb, xt_in(par)))
89 return true ^ invert; 89 return true ^ invert;
90 90
91 iph = ipv6_hdr(skb); 91 iph = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 10090400c72f..eedee5d108d9 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
157 fl6.fl6_sport = otcph->dest; 157 fl6.fl6_sport = otcph->dest;
158 fl6.fl6_dport = otcph->source; 158 fl6.fl6_dport = otcph->source;
159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
160 fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 161 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
161 dst = ip6_route_output(net, NULL, &fl6); 162 dst = ip6_route_output(net, NULL, &fl6);
162 if (dst->error) { 163 if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
180 181
181 skb_dst_set(nskb, dst); 182 skb_dst_set(nskb, dst);
182 183
184 nskb->mark = fl6.flowi6_mark;
185
183 skb_reserve(nskb, hh_len + dst->header_len); 186 skb_reserve(nskb, hh_len + dst->header_len);
184 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, 187 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
185 ip6_dst_hoplimit(dst)); 188 ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index c947aad8bcc6..765facf03d45 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -18,13 +18,6 @@
18#include <net/ip6_fib.h> 18#include <net/ip6_fib.h>
19#include <net/ip6_route.h> 19#include <net/ip6_route.h>
20 20
21static bool fib6_is_local(const struct sk_buff *skb)
22{
23 const struct rt6_info *rt = (const void *)skb_dst(skb);
24
25 return rt && (rt->rt6i_flags & RTF_LOCAL);
26}
27
28static int get_ifindex(const struct net_device *dev) 21static int get_ifindex(const struct net_device *dev)
29{ 22{
30 return dev ? dev->ifindex : 0; 23 return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
164 157
165 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); 158 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
166 159
167 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) { 160 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
168 nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 161 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
162 nft_fib_store_result(dest, priv->result, pkt,
163 nft_in(pkt)->ifindex);
169 return; 164 return;
170 } 165 }
171 166
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4f6b067c8753..7ea85370c11c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2896,6 +2896,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2896 if (tb[RTA_MULTIPATH]) { 2896 if (tb[RTA_MULTIPATH]) {
2897 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); 2897 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2898 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); 2898 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2899
2900 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2901 cfg->fc_mp_len);
2902 if (err < 0)
2903 goto errout;
2899 } 2904 }
2900 2905
2901 if (tb[RTA_PREF]) { 2906 if (tb[RTA_PREF]) {
@@ -2909,9 +2914,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2909 if (tb[RTA_ENCAP]) 2914 if (tb[RTA_ENCAP])
2910 cfg->fc_encap = tb[RTA_ENCAP]; 2915 cfg->fc_encap = tb[RTA_ENCAP];
2911 2916
2912 if (tb[RTA_ENCAP_TYPE]) 2917 if (tb[RTA_ENCAP_TYPE]) {
2913 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); 2918 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2914 2919
2920 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
2921 if (err < 0)
2922 goto errout;
2923 }
2924
2915 if (tb[RTA_EXPIRES]) { 2925 if (tb[RTA_EXPIRES]) {
2916 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); 2926 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
2917 2927
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index b172d85c650a..a855eb325b03 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
176 176
177 val = nla_data(info->attrs[SEG6_ATTR_DST]); 177 val = nla_data(info->attrs[SEG6_ATTR_DST]);
178 t_new = kmemdup(val, sizeof(*val), GFP_KERNEL); 178 t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
179 if (!t_new)
180 return -ENOMEM;
179 181
180 mutex_lock(&sdata->lock); 182 mutex_lock(&sdata->lock);
181 183
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 1d60cb132835..c46f8cbf5ab5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -422,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
422 .fill_encap = seg6_fill_encap_info, 422 .fill_encap = seg6_fill_encap_info,
423 .get_encap_size = seg6_encap_nlsize, 423 .get_encap_size = seg6_encap_nlsize,
424 .cmp_encap = seg6_encap_cmp, 424 .cmp_encap = seg6_encap_cmp,
425 .owner = THIS_MODULE,
425}; 426};
426 427
427int __init seg6_iptunnel_init(void) 428int __init seg6_iptunnel_init(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 73bc8fc68acd..cb8929681dc7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -469,7 +469,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
469 opt = ireq->ipv6_opt; 469 opt = ireq->ipv6_opt;
470 if (!opt) 470 if (!opt)
471 opt = rcu_dereference(np->opt); 471 opt = rcu_dereference(np->opt);
472 err = ip6_xmit(sk, skb, fl6, opt, np->tclass); 472 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
473 rcu_read_unlock(); 473 rcu_read_unlock();
474 err = net_xmit_eval(err); 474 err = net_xmit_eval(err);
475 } 475 }
@@ -840,7 +840,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
841 if (!IS_ERR(dst)) { 841 if (!IS_ERR(dst)) {
842 skb_dst_set(buff, dst); 842 skb_dst_set(buff, dst);
843 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 843 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
845 if (rst) 845 if (rst)
846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9e2641d45587..206698bc93f4 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta)
40 40
41 ieee80211_sta_set_rx_nss(sta); 41 ieee80211_sta_set_rx_nss(sta);
42 42
43 ieee80211_recalc_min_chandef(sta->sdata);
44
45 if (!ref) 43 if (!ref)
46 return; 44 return;
47 45
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 15fe97644ffe..5b77377e5a15 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
98} 98}
99EXPORT_SYMBOL_GPL(mpls_pkt_too_big); 99EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
100 100
101static u32 mpls_multipath_hash(struct mpls_route *rt, 101static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
102 struct sk_buff *skb, bool bos)
103{ 102{
104 struct mpls_entry_decoded dec; 103 struct mpls_entry_decoded dec;
104 unsigned int mpls_hdr_len = 0;
105 struct mpls_shim_hdr *hdr; 105 struct mpls_shim_hdr *hdr;
106 bool eli_seen = false; 106 bool eli_seen = false;
107 int label_index; 107 int label_index;
108 u32 hash = 0; 108 u32 hash = 0;
109 109
110 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos; 110 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
111 label_index++) { 111 label_index++) {
112 if (!pskb_may_pull(skb, sizeof(*hdr) * label_index)) 112 mpls_hdr_len += sizeof(*hdr);
113 if (!pskb_may_pull(skb, mpls_hdr_len))
113 break; 114 break;
114 115
115 /* Read and decode the current label */ 116 /* Read and decode the current label */
@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
134 eli_seen = true; 135 eli_seen = true;
135 } 136 }
136 137
137 bos = dec.bos; 138 if (!dec.bos)
138 if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index + 139 continue;
139 sizeof(struct iphdr))) { 140
141 /* found bottom label; does skb have room for a header? */
142 if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
140 const struct iphdr *v4hdr; 143 const struct iphdr *v4hdr;
141 144
142 v4hdr = (const struct iphdr *)(mpls_hdr(skb) + 145 v4hdr = (const struct iphdr *)(hdr + 1);
143 label_index);
144 if (v4hdr->version == 4) { 146 if (v4hdr->version == 4) {
145 hash = jhash_3words(ntohl(v4hdr->saddr), 147 hash = jhash_3words(ntohl(v4hdr->saddr),
146 ntohl(v4hdr->daddr), 148 ntohl(v4hdr->daddr),
147 v4hdr->protocol, hash); 149 v4hdr->protocol, hash);
148 } else if (v4hdr->version == 6 && 150 } else if (v4hdr->version == 6 &&
149 pskb_may_pull(skb, sizeof(*hdr) * label_index + 151 pskb_may_pull(skb, mpls_hdr_len +
150 sizeof(struct ipv6hdr))) { 152 sizeof(struct ipv6hdr))) {
151 const struct ipv6hdr *v6hdr; 153 const struct ipv6hdr *v6hdr;
152 154
153 v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) + 155 v6hdr = (const struct ipv6hdr *)(hdr + 1);
154 label_index);
155
156 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash); 156 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
157 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash); 157 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
158 hash = jhash_1word(v6hdr->nexthdr, hash); 158 hash = jhash_1word(v6hdr->nexthdr, hash);
159 } 159 }
160 } 160 }
161
162 break;
161 } 163 }
162 164
163 return hash; 165 return hash;
164} 166}
165 167
166static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, 168static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
167 struct sk_buff *skb, bool bos) 169 struct sk_buff *skb)
168{ 170{
169 int alive = ACCESS_ONCE(rt->rt_nhn_alive); 171 int alive = ACCESS_ONCE(rt->rt_nhn_alive);
170 u32 hash = 0; 172 u32 hash = 0;
@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
180 if (alive <= 0) 182 if (alive <= 0)
181 return NULL; 183 return NULL;
182 184
183 hash = mpls_multipath_hash(rt, skb, bos); 185 hash = mpls_multipath_hash(rt, skb);
184 nh_index = hash % alive; 186 nh_index = hash % alive;
185 if (alive == rt->rt_nhn) 187 if (alive == rt->rt_nhn)
186 goto out; 188 goto out;
@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
278 hdr = mpls_hdr(skb); 280 hdr = mpls_hdr(skb);
279 dec = mpls_entry_decode(hdr); 281 dec = mpls_entry_decode(hdr);
280 282
281 /* Pop the label */
282 skb_pull(skb, sizeof(*hdr));
283 skb_reset_network_header(skb);
284
285 skb_orphan(skb);
286
287 rt = mpls_route_input_rcu(net, dec.label); 283 rt = mpls_route_input_rcu(net, dec.label);
288 if (!rt) 284 if (!rt)
289 goto drop; 285 goto drop;
290 286
291 nh = mpls_select_multipath(rt, skb, dec.bos); 287 nh = mpls_select_multipath(rt, skb);
292 if (!nh) 288 if (!nh)
293 goto drop; 289 goto drop;
294 290
@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
297 if (!mpls_output_possible(out_dev)) 293 if (!mpls_output_possible(out_dev))
298 goto drop; 294 goto drop;
299 295
296 /* Pop the label */
297 skb_pull(skb, sizeof(*hdr));
298 skb_reset_network_header(skb);
299
300 skb_orphan(skb);
301
300 if (skb_warn_if_lro(skb)) 302 if (skb_warn_if_lro(skb))
301 goto drop; 303 goto drop;
302 304
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 2f7ccd934416..1d281c1ff7c1 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -215,6 +215,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
215 .fill_encap = mpls_fill_encap_info, 215 .fill_encap = mpls_fill_encap_info,
216 .get_encap_size = mpls_encap_nlsize, 216 .get_encap_size = mpls_encap_nlsize,
217 .cmp_encap = mpls_encap_cmp, 217 .cmp_encap = mpls_encap_cmp,
218 .owner = THIS_MODULE,
218}; 219};
219 220
220static int __init mpls_iptunnel_init(void) 221static int __init mpls_iptunnel_init(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 63729b489c2c..bbc45f8a7b2d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -494,7 +494,7 @@ config NFT_CT
494 depends on NF_CONNTRACK 494 depends on NF_CONNTRACK
495 tristate "Netfilter nf_tables conntrack module" 495 tristate "Netfilter nf_tables conntrack module"
496 help 496 help
497 This option adds the "meta" expression that you can use to match 497 This option adds the "ct" expression that you can use to match
498 connection tracking information such as the flow state. 498 connection tracking information such as the flow state.
499 499
500config NFT_SET_RBTREE 500config NFT_SET_RBTREE
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3a073cd9fcf4..4e8083c5e01d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
85static __read_mostly bool nf_conntrack_locks_all; 85static __read_mostly bool nf_conntrack_locks_all;
86 86
87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ 87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
88#define GC_MAX_BUCKETS_DIV 64u 88#define GC_MAX_BUCKETS_DIV 128u
89/* upper bound of scan intervals */ 89/* upper bound of full table scan */
90#define GC_INTERVAL_MAX (2 * HZ) 90#define GC_MAX_SCAN_JIFFIES (16u * HZ)
91/* maximum conntracks to evict per gc run */ 91/* desired ratio of entries found to be expired */
92#define GC_MAX_EVICTS 256u 92#define GC_EVICT_RATIO 50u
93 93
94static struct conntrack_gc_work conntrack_gc_work; 94static struct conntrack_gc_work conntrack_gc_work;
95 95
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
938 938
939static void gc_worker(struct work_struct *work) 939static void gc_worker(struct work_struct *work)
940{ 940{
941 unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
941 unsigned int i, goal, buckets = 0, expired_count = 0; 942 unsigned int i, goal, buckets = 0, expired_count = 0;
942 struct conntrack_gc_work *gc_work; 943 struct conntrack_gc_work *gc_work;
943 unsigned int ratio, scanned = 0; 944 unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
979 */ 980 */
980 rcu_read_unlock(); 981 rcu_read_unlock();
981 cond_resched_rcu_qs(); 982 cond_resched_rcu_qs();
982 } while (++buckets < goal && 983 } while (++buckets < goal);
983 expired_count < GC_MAX_EVICTS);
984 984
985 if (gc_work->exiting) 985 if (gc_work->exiting)
986 return; 986 return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
997 * 1. Minimize time until we notice a stale entry 997 * 1. Minimize time until we notice a stale entry
998 * 2. Maximize scan intervals to not waste cycles 998 * 2. Maximize scan intervals to not waste cycles
999 * 999 *
1000 * Normally, expired_count will be 0, this increases the next_run time 1000 * Normally, expire ratio will be close to 0.
1001 * to priorize 2) above.
1002 * 1001 *
1003 * As soon as a timed-out entry is found, move towards 1) and increase 1002 * As soon as a sizeable fraction of the entries have expired
1004 * the scan frequency. 1003 * increase scan frequency.
1005 * In case we have lots of evictions next scan is done immediately.
1006 */ 1004 */
1007 ratio = scanned ? expired_count * 100 / scanned : 0; 1005 ratio = scanned ? expired_count * 100 / scanned : 0;
1008 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { 1006 if (ratio > GC_EVICT_RATIO) {
1009 gc_work->next_gc_run = 0; 1007 gc_work->next_gc_run = min_interval;
1010 next_run = 0;
1011 } else if (expired_count) {
1012 gc_work->next_gc_run /= 2U;
1013 next_run = msecs_to_jiffies(1);
1014 } else { 1008 } else {
1015 if (gc_work->next_gc_run < GC_INTERVAL_MAX) 1009 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1016 gc_work->next_gc_run += msecs_to_jiffies(1);
1017 1010
1018 next_run = gc_work->next_gc_run; 1011 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1012
1013 gc_work->next_gc_run += min_interval;
1014 if (gc_work->next_gc_run > max)
1015 gc_work->next_gc_run = max;
1019 } 1016 }
1020 1017
1018 next_run = gc_work->next_gc_run;
1021 gc_work->last_bucket = i; 1019 gc_work->last_bucket = i;
1022 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); 1020 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
1023} 1021}
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
1025static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 1023static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1026{ 1024{
1027 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1025 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1028 gc_work->next_gc_run = GC_INTERVAL_MAX; 1026 gc_work->next_gc_run = HZ;
1029 gc_work->exiting = false; 1027 gc_work->exiting = false;
1030} 1028}
1031 1029
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
1917 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1915 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1918 1916
1919 conntrack_gc_work_init(&conntrack_gc_work); 1917 conntrack_gc_work_init(&conntrack_gc_work);
1920 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); 1918 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
1921 1919
1922 return 0; 1920 return 0;
1923 1921
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90dc24ad..ffb9e8ada899 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
13/* Internal logging interface, which relies on the real 13/* Internal logging interface, which relies on the real
14 LOG target modules */ 14 LOG target modules */
15 15
16#define NF_LOG_PREFIXLEN 128
17#define NFLOGGER_NAME_LEN 64 16#define NFLOGGER_NAME_LEN 64
18 17
19static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; 18static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 0db5f9782265..1b913760f205 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
928} 928}
929 929
930static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { 930static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
931 [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, 931 [NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
932 .len = NFT_TABLE_MAXNAMELEN - 1 },
932 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, 933 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
933 [NFTA_CHAIN_NAME] = { .type = NLA_STRING, 934 [NFTA_CHAIN_NAME] = { .type = NLA_STRING,
934 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 935 .len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
1854} 1855}
1855 1856
1856static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { 1857static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
1857 [NFTA_RULE_TABLE] = { .type = NLA_STRING }, 1858 [NFTA_RULE_TABLE] = { .type = NLA_STRING,
1859 .len = NFT_TABLE_MAXNAMELEN - 1 },
1858 [NFTA_RULE_CHAIN] = { .type = NLA_STRING, 1860 [NFTA_RULE_CHAIN] = { .type = NLA_STRING,
1859 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 1861 .len = NFT_CHAIN_MAXNAMELEN - 1 },
1860 [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, 1862 [NFTA_RULE_HANDLE] = { .type = NLA_U64 },
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
2443} 2445}
2444 2446
2445static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { 2447static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
2446 [NFTA_SET_TABLE] = { .type = NLA_STRING }, 2448 [NFTA_SET_TABLE] = { .type = NLA_STRING,
2449 .len = NFT_TABLE_MAXNAMELEN - 1 },
2447 [NFTA_SET_NAME] = { .type = NLA_STRING, 2450 [NFTA_SET_NAME] = { .type = NLA_STRING,
2448 .len = NFT_SET_MAXNAMELEN - 1 }, 2451 .len = NFT_SET_MAXNAMELEN - 1 },
2449 [NFTA_SET_FLAGS] = { .type = NLA_U32 }, 2452 [NFTA_SET_FLAGS] = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
3084} 3087}
3085 3088
3086static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, 3089static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
3087 const struct nft_set *set, 3090 struct nft_set *set,
3088 const struct nft_set_iter *iter, 3091 const struct nft_set_iter *iter,
3089 const struct nft_set_elem *elem) 3092 struct nft_set_elem *elem)
3090{ 3093{
3091 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 3094 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3092 enum nft_registers dreg; 3095 enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
3192}; 3195};
3193 3196
3194static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { 3197static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
3195 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, 3198 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
3196 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, 3199 .len = NFT_TABLE_MAXNAMELEN - 1 },
3200 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
3201 .len = NFT_SET_MAXNAMELEN - 1 },
3197 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, 3202 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
3198 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3203 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
3199}; 3204};
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
3303}; 3308};
3304 3309
3305static int nf_tables_dump_setelem(const struct nft_ctx *ctx, 3310static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3306 const struct nft_set *set, 3311 struct nft_set *set,
3307 const struct nft_set_iter *iter, 3312 const struct nft_set_iter *iter,
3308 const struct nft_set_elem *elem) 3313 struct nft_set_elem *elem)
3309{ 3314{
3310 struct nft_set_dump_args *args; 3315 struct nft_set_dump_args *args;
3311 3316
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3317{ 3322{
3318 struct net *net = sock_net(skb->sk); 3323 struct net *net = sock_net(skb->sk);
3319 u8 genmask = nft_genmask_cur(net); 3324 u8 genmask = nft_genmask_cur(net);
3320 const struct nft_set *set; 3325 struct nft_set *set;
3321 struct nft_set_dump_args args; 3326 struct nft_set_dump_args args;
3322 struct nft_ctx ctx; 3327 struct nft_ctx ctx;
3323 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; 3328 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3740 goto err5; 3745 goto err5;
3741 } 3746 }
3742 3747
3748 if (set->size &&
3749 !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
3750 err = -ENFILE;
3751 goto err6;
3752 }
3753
3743 nft_trans_elem(trans) = elem; 3754 nft_trans_elem(trans) = elem;
3744 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3755 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3745 return 0; 3756 return 0;
3746 3757
3758err6:
3759 set->ops->remove(set, &elem);
3747err5: 3760err5:
3748 kfree(trans); 3761 kfree(trans);
3749err4: 3762err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3790 return -EBUSY; 3803 return -EBUSY;
3791 3804
3792 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3805 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
3793 if (set->size &&
3794 !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
3795 return -ENFILE;
3796
3797 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); 3806 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
3798 if (err < 0) { 3807 if (err < 0)
3799 atomic_dec(&set->nelems);
3800 break; 3808 break;
3801 }
3802 } 3809 }
3803 return err; 3810 return err;
3804} 3811}
@@ -3883,9 +3890,9 @@ err1:
3883} 3890}
3884 3891
3885static int nft_flush_set(const struct nft_ctx *ctx, 3892static int nft_flush_set(const struct nft_ctx *ctx,
3886 const struct nft_set *set, 3893 struct nft_set *set,
3887 const struct nft_set_iter *iter, 3894 const struct nft_set_iter *iter,
3888 const struct nft_set_elem *elem) 3895 struct nft_set_elem *elem)
3889{ 3896{
3890 struct nft_trans *trans; 3897 struct nft_trans *trans;
3891 int err; 3898 int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
3899 err = -ENOENT; 3906 err = -ENOENT;
3900 goto err1; 3907 goto err1;
3901 } 3908 }
3909 set->ndeact++;
3902 3910
3903 nft_trans_elem_set(trans) = (struct nft_set *)set; 3911 nft_trans_elem_set(trans) = set;
3904 nft_trans_elem(trans) = *((struct nft_set_elem *)elem); 3912 nft_trans_elem(trans) = *elem;
3905 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3913 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3906 3914
3907 return 0; 3915 return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
4032EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); 4040EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
4033 4041
4034static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { 4042static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
4035 [NFTA_OBJ_TABLE] = { .type = NLA_STRING }, 4043 [NFTA_OBJ_TABLE] = { .type = NLA_STRING,
4036 [NFTA_OBJ_NAME] = { .type = NLA_STRING }, 4044 .len = NFT_TABLE_MAXNAMELEN - 1 },
4045 [NFTA_OBJ_NAME] = { .type = NLA_STRING,
4046 .len = NFT_OBJ_MAXNAMELEN - 1 },
4037 [NFTA_OBJ_TYPE] = { .type = NLA_U32 }, 4047 [NFTA_OBJ_TYPE] = { .type = NLA_U32 },
4038 [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, 4048 [NFTA_OBJ_DATA] = { .type = NLA_NESTED },
4039}; 4049};
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
4262 if (idx > s_idx) 4272 if (idx > s_idx)
4263 memset(&cb->args[1], 0, 4273 memset(&cb->args[1], 0,
4264 sizeof(cb->args) - sizeof(cb->args[0])); 4274 sizeof(cb->args) - sizeof(cb->args[0]));
4265 if (filter->table[0] && 4275 if (filter && filter->table[0] &&
4266 strcmp(filter->table, table->name)) 4276 strcmp(filter->table, table->name))
4267 goto cont; 4277 goto cont;
4268 if (filter->type != NFT_OBJECT_UNSPEC && 4278 if (filter &&
4279 filter->type != NFT_OBJECT_UNSPEC &&
4269 obj->type->type != filter->type) 4280 obj->type->type != filter->type)
4270 goto cont; 4281 goto cont;
4271 4282
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5009 const struct nft_chain *chain); 5020 const struct nft_chain *chain);
5010 5021
5011static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, 5022static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
5012 const struct nft_set *set, 5023 struct nft_set *set,
5013 const struct nft_set_iter *iter, 5024 const struct nft_set_iter *iter,
5014 const struct nft_set_elem *elem) 5025 struct nft_set_elem *elem)
5015{ 5026{
5016 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 5027 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
5017 const struct nft_data *data; 5028 const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5035{ 5046{
5036 const struct nft_rule *rule; 5047 const struct nft_rule *rule;
5037 const struct nft_expr *expr, *last; 5048 const struct nft_expr *expr, *last;
5038 const struct nft_set *set; 5049 struct nft_set *set;
5039 struct nft_set_binding *binding; 5050 struct nft_set_binding *binding;
5040 struct nft_set_iter iter; 5051 struct nft_set_iter iter;
5041 5052
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 7de2f46734a4..049ad2d9ee66 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -98,7 +98,8 @@ out:
98} 98}
99 99
100static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { 100static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
101 [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING }, 101 [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
102 .len = NFT_SET_MAXNAMELEN - 1 },
102 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, 103 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
103 [NFTA_DYNSET_OP] = { .type = NLA_U32 }, 104 [NFTA_DYNSET_OP] = { .type = NLA_U32 },
104 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 }, 105 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6271e40a3dd6..6f6e64423643 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
39 39
40static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { 40static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
41 [NFTA_LOG_GROUP] = { .type = NLA_U16 }, 41 [NFTA_LOG_GROUP] = { .type = NLA_U16 },
42 [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, 42 [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
43 .len = NF_LOG_PREFIXLEN - 1 },
43 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, 44 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
44 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, 45 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
45 [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, 46 [NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index d4f97fa7e21d..e21aea7e5ec8 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
49} 49}
50 50
51static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { 51static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
52 [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, 52 [NFTA_LOOKUP_SET] = { .type = NLA_STRING,
53 .len = NFT_SET_MAXNAMELEN - 1 },
53 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, 54 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
54 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, 55 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
55 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, 56 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 415a65ba2b85..1ae8c49ca4a1 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
193} 193}
194 194
195static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = { 195static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
196 [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING }, 196 [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
197 .len = NFT_OBJ_MAXNAMELEN - 1 },
197 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 }, 198 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
198 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 }, 199 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
199 [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING }, 200 [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
201 .len = NFT_SET_MAXNAMELEN - 1 },
200 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 }, 202 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
201}; 203};
202 204
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 1e20e2bbb6d9..e36069fb76ae 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params); 212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
213} 213}
214 214
215static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, 215static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
216 struct nft_set_iter *iter) 216 struct nft_set_iter *iter)
217{ 217{
218 struct nft_hash *priv = nft_set_priv(set); 218 struct nft_hash *priv = nft_set_priv(set);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 08376e50f6cd..f06f55ee516d 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
221} 221}
222 222
223static void nft_rbtree_walk(const struct nft_ctx *ctx, 223static void nft_rbtree_walk(const struct nft_ctx *ctx,
224 const struct nft_set *set, 224 struct nft_set *set,
225 struct nft_set_iter *iter) 225 struct nft_set_iter *iter)
226{ 226{
227 const struct nft_rbtree *priv = nft_set_priv(set); 227 const struct nft_rbtree *priv = nft_set_priv(set);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b9e1a13b4ba3..3d555c79a7b5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1976,7 +1976,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1976 return -EINVAL; 1976 return -EINVAL;
1977 *len -= sizeof(vnet_hdr); 1977 *len -= sizeof(vnet_hdr);
1978 1978
1979 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le())) 1979 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
1980 return -EINVAL; 1980 return -EINVAL;
1981 1981
1982 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 1982 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2237,7 +2237,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2237 if (po->has_vnet_hdr) { 2237 if (po->has_vnet_hdr) {
2238 if (virtio_net_hdr_from_skb(skb, h.raw + macoff - 2238 if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2239 sizeof(struct virtio_net_hdr), 2239 sizeof(struct virtio_net_hdr),
2240 vio_le())) { 2240 vio_le(), true)) {
2241 spin_lock(&sk->sk_receive_queue.lock); 2241 spin_lock(&sk->sk_receive_queue.lock);
2242 goto drop_n_account; 2242 goto drop_n_account;
2243 } 2243 }
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 970db7a41684..5752789acc13 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
568 &mask->icmp.type, 568 &mask->icmp.type,
569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
570 sizeof(key->icmp.type)); 570 sizeof(key->icmp.type));
571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
572 &mask->icmp.code, 572 &mask->icmp.code,
573 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 573 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
574 sizeof(key->icmp.code)); 574 sizeof(key->icmp.code));
575 } 575 }
576 576
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index f935429bd5ef..b12bc2abea93 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -16,16 +16,11 @@
16#include <net/sch_generic.h> 16#include <net/sch_generic.h>
17#include <net/pkt_cls.h> 17#include <net/pkt_cls.h>
18 18
19struct cls_mall_filter { 19struct cls_mall_head {
20 struct tcf_exts exts; 20 struct tcf_exts exts;
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 struct rcu_head rcu;
24 u32 flags; 23 u32 flags;
25};
26
27struct cls_mall_head {
28 struct cls_mall_filter *filter;
29 struct rcu_head rcu; 24 struct rcu_head rcu;
30}; 25};
31 26
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33 struct tcf_result *res) 28 struct tcf_result *res)
34{ 29{
35 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 30 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36 struct cls_mall_filter *f = head->filter;
37 31
38 if (tc_skip_sw(f->flags)) 32 if (tc_skip_sw(head->flags))
39 return -1; 33 return -1;
40 34
41 return tcf_exts_exec(skb, &f->exts, res); 35 return tcf_exts_exec(skb, &head->exts, res);
42} 36}
43 37
44static int mall_init(struct tcf_proto *tp) 38static int mall_init(struct tcf_proto *tp)
45{ 39{
46 struct cls_mall_head *head;
47
48 head = kzalloc(sizeof(*head), GFP_KERNEL);
49 if (!head)
50 return -ENOBUFS;
51
52 rcu_assign_pointer(tp->root, head);
53
54 return 0; 40 return 0;
55} 41}
56 42
57static void mall_destroy_filter(struct rcu_head *head) 43static void mall_destroy_rcu(struct rcu_head *rcu)
58{ 44{
59 struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); 45 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46 rcu);
60 47
61 tcf_exts_destroy(&f->exts); 48 tcf_exts_destroy(&head->exts);
62 49 kfree(head);
63 kfree(f);
64} 50}
65 51
66static int mall_replace_hw_filter(struct tcf_proto *tp, 52static int mall_replace_hw_filter(struct tcf_proto *tp,
67 struct cls_mall_filter *f, 53 struct cls_mall_head *head,
68 unsigned long cookie) 54 unsigned long cookie)
69{ 55{
70 struct net_device *dev = tp->q->dev_queue->dev; 56 struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
74 offload.type = TC_SETUP_MATCHALL; 60 offload.type = TC_SETUP_MATCHALL;
75 offload.cls_mall = &mall_offload; 61 offload.cls_mall = &mall_offload;
76 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; 62 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77 offload.cls_mall->exts = &f->exts; 63 offload.cls_mall->exts = &head->exts;
78 offload.cls_mall->cookie = cookie; 64 offload.cls_mall->cookie = cookie;
79 65
80 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 66 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
82} 68}
83 69
84static void mall_destroy_hw_filter(struct tcf_proto *tp, 70static void mall_destroy_hw_filter(struct tcf_proto *tp,
85 struct cls_mall_filter *f, 71 struct cls_mall_head *head,
86 unsigned long cookie) 72 unsigned long cookie)
87{ 73{
88 struct net_device *dev = tp->q->dev_queue->dev; 74 struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
103{ 89{
104 struct cls_mall_head *head = rtnl_dereference(tp->root); 90 struct cls_mall_head *head = rtnl_dereference(tp->root);
105 struct net_device *dev = tp->q->dev_queue->dev; 91 struct net_device *dev = tp->q->dev_queue->dev;
106 struct cls_mall_filter *f = head->filter;
107 92
108 if (!force && f) 93 if (!head)
109 return false; 94 return true;
110 95
111 if (f) { 96 if (tc_should_offload(dev, tp, head->flags))
112 if (tc_should_offload(dev, tp, f->flags)) 97 mall_destroy_hw_filter(tp, head, (unsigned long) head);
113 mall_destroy_hw_filter(tp, f, (unsigned long) f);
114 98
115 call_rcu(&f->rcu, mall_destroy_filter); 99 call_rcu(&head->rcu, mall_destroy_rcu);
116 }
117 kfree_rcu(head, rcu);
118 return true; 100 return true;
119} 101}
120 102
121static unsigned long mall_get(struct tcf_proto *tp, u32 handle) 103static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122{ 104{
123 struct cls_mall_head *head = rtnl_dereference(tp->root); 105 return 0UL;
124 struct cls_mall_filter *f = head->filter;
125
126 if (f && f->handle == handle)
127 return (unsigned long) f;
128 return 0;
129} 106}
130 107
131static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 108static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
134}; 111};
135 112
136static int mall_set_parms(struct net *net, struct tcf_proto *tp, 113static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137 struct cls_mall_filter *f, 114 struct cls_mall_head *head,
138 unsigned long base, struct nlattr **tb, 115 unsigned long base, struct nlattr **tb,
139 struct nlattr *est, bool ovr) 116 struct nlattr *est, bool ovr)
140{ 117{
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
147 return err; 124 return err;
148 125
149 if (tb[TCA_MATCHALL_CLASSID]) { 126 if (tb[TCA_MATCHALL_CLASSID]) {
150 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 127 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151 tcf_bind_filter(tp, &f->res, base); 128 tcf_bind_filter(tp, &head->res, base);
152 } 129 }
153 130
154 tcf_exts_change(tp, &f->exts, &e); 131 tcf_exts_change(tp, &head->exts, &e);
155 132
156 return 0; 133 return 0;
157} 134}
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
162 unsigned long *arg, bool ovr) 139 unsigned long *arg, bool ovr)
163{ 140{
164 struct cls_mall_head *head = rtnl_dereference(tp->root); 141 struct cls_mall_head *head = rtnl_dereference(tp->root);
165 struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166 struct net_device *dev = tp->q->dev_queue->dev; 142 struct net_device *dev = tp->q->dev_queue->dev;
167 struct cls_mall_filter *f;
168 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 143 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
144 struct cls_mall_head *new;
169 u32 flags = 0; 145 u32 flags = 0;
170 int err; 146 int err;
171 147
172 if (!tca[TCA_OPTIONS]) 148 if (!tca[TCA_OPTIONS])
173 return -EINVAL; 149 return -EINVAL;
174 150
175 if (head->filter) 151 if (head)
176 return -EBUSY; 152 return -EEXIST;
177
178 if (fold)
179 return -EINVAL;
180 153
181 err = nla_parse_nested(tb, TCA_MATCHALL_MAX, 154 err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182 tca[TCA_OPTIONS], mall_policy); 155 tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
189 return -EINVAL; 162 return -EINVAL;
190 } 163 }
191 164
192 f = kzalloc(sizeof(*f), GFP_KERNEL); 165 new = kzalloc(sizeof(*new), GFP_KERNEL);
193 if (!f) 166 if (!new)
194 return -ENOBUFS; 167 return -ENOBUFS;
195 168
196 tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); 169 tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
197 170
198 if (!handle) 171 if (!handle)
199 handle = 1; 172 handle = 1;
200 f->handle = handle; 173 new->handle = handle;
201 f->flags = flags; 174 new->flags = flags;
202 175
203 err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); 176 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
204 if (err) 177 if (err)
205 goto errout; 178 goto errout;
206 179
207 if (tc_should_offload(dev, tp, flags)) { 180 if (tc_should_offload(dev, tp, flags)) {
208 err = mall_replace_hw_filter(tp, f, (unsigned long) f); 181 err = mall_replace_hw_filter(tp, new, (unsigned long) new);
209 if (err) { 182 if (err) {
210 if (tc_skip_sw(flags)) 183 if (tc_skip_sw(flags))
211 goto errout; 184 goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
214 } 187 }
215 } 188 }
216 189
217 *arg = (unsigned long) f; 190 *arg = (unsigned long) head;
218 rcu_assign_pointer(head->filter, f); 191 rcu_assign_pointer(tp->root, new);
219 192 if (head)
193 call_rcu(&head->rcu, mall_destroy_rcu);
220 return 0; 194 return 0;
221 195
222errout: 196errout:
223 kfree(f); 197 kfree(new);
224 return err; 198 return err;
225} 199}
226 200
227static int mall_delete(struct tcf_proto *tp, unsigned long arg) 201static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228{ 202{
229 struct cls_mall_head *head = rtnl_dereference(tp->root); 203 return -EOPNOTSUPP;
230 struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231 struct net_device *dev = tp->q->dev_queue->dev;
232
233 if (tc_should_offload(dev, tp, f->flags))
234 mall_destroy_hw_filter(tp, f, (unsigned long) f);
235
236 RCU_INIT_POINTER(head->filter, NULL);
237 tcf_unbind_filter(tp, &f->res);
238 call_rcu(&f->rcu, mall_destroy_filter);
239 return 0;
240} 204}
241 205
242static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) 206static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243{ 207{
244 struct cls_mall_head *head = rtnl_dereference(tp->root); 208 struct cls_mall_head *head = rtnl_dereference(tp->root);
245 struct cls_mall_filter *f = head->filter;
246 209
247 if (arg->count < arg->skip) 210 if (arg->count < arg->skip)
248 goto skip; 211 goto skip;
249 if (arg->fn(tp, (unsigned long) f, arg) < 0) 212 if (arg->fn(tp, (unsigned long) head, arg) < 0)
250 arg->stop = 1; 213 arg->stop = 1;
251skip: 214skip:
252 arg->count++; 215 arg->count++;
@@ -255,28 +218,28 @@ skip:
255static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 218static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256 struct sk_buff *skb, struct tcmsg *t) 219 struct sk_buff *skb, struct tcmsg *t)
257{ 220{
258 struct cls_mall_filter *f = (struct cls_mall_filter *) fh; 221 struct cls_mall_head *head = (struct cls_mall_head *) fh;
259 struct nlattr *nest; 222 struct nlattr *nest;
260 223
261 if (!f) 224 if (!head)
262 return skb->len; 225 return skb->len;
263 226
264 t->tcm_handle = f->handle; 227 t->tcm_handle = head->handle;
265 228
266 nest = nla_nest_start(skb, TCA_OPTIONS); 229 nest = nla_nest_start(skb, TCA_OPTIONS);
267 if (!nest) 230 if (!nest)
268 goto nla_put_failure; 231 goto nla_put_failure;
269 232
270 if (f->res.classid && 233 if (head->res.classid &&
271 nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) 234 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
272 goto nla_put_failure; 235 goto nla_put_failure;
273 236
274 if (tcf_exts_dump(skb, &f->exts)) 237 if (tcf_exts_dump(skb, &head->exts))
275 goto nla_put_failure; 238 goto nla_put_failure;
276 239
277 nla_nest_end(skb, nest); 240 nla_nest_end(skb, nest);
278 241
279 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 242 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
280 goto nla_put_failure; 243 goto nla_put_failure;
281 244
282 return skb->len; 245 return skb->len;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 5ed8e79bf102..64dfd35ccdcc 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
223 223
224 rcu_read_lock(); 224 rcu_read_lock();
225 res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); 225 res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
226 np->tclass);
226 rcu_read_unlock(); 227 rcu_read_unlock();
227 return res; 228 return res;
228} 229}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0cca69..4f5a2b580aa5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
68 goto out; 68 goto out;
69 } 69 }
70 70
71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM); 71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
72 if (IS_ERR(segs)) 72 if (IS_ERR(segs))
73 goto out; 73 goto out;
74 74
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 318c6786d653..37eeab7899fc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
235 sctp_assoc_t id) 235 sctp_assoc_t id)
236{ 236{
237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
238 struct sctp_transport *transport; 238 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
239 union sctp_addr *laddr = (union sctp_addr *)addr; 239 union sctp_addr *laddr = (union sctp_addr *)addr;
240 struct sctp_transport *transport;
241
242 if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
243 return NULL;
240 244
241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
242 laddr, 246 laddr,
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index dc6fb79a361f..25d9a9cf7b66 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
260 if (!oa->data) 260 if (!oa->data)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
263 creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); 263 creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
264 if (!creds) { 264 if (!creds) {
265 kfree(oa->data); 265 kfree(oa->data);
266 return -ENOMEM; 266 return -ENOMEM;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 1efbe48e794f..1dc9f3bac099 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -336,6 +336,11 @@ out:
336 336
337static DEFINE_IDA(rpc_clids); 337static DEFINE_IDA(rpc_clids);
338 338
339void rpc_cleanup_clids(void)
340{
341 ida_destroy(&rpc_clids);
342}
343
339static int rpc_alloc_clid(struct rpc_clnt *clnt) 344static int rpc_alloc_clid(struct rpc_clnt *clnt)
340{ 345{
341 int clid; 346 int clid;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index d1c330a7953a..c73de181467a 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -119,6 +119,7 @@ out:
119static void __exit 119static void __exit
120cleanup_sunrpc(void) 120cleanup_sunrpc(void)
121{ 121{
122 rpc_cleanup_clids();
122 rpcauth_remove_module(); 123 rpcauth_remove_module();
123 cleanup_socket_xprt(); 124 cleanup_socket_xprt();
124 svc_cleanup_xprt_sock(); 125 svc_cleanup_xprt_sock();
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2b08ab..27753325e06e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
263 write_lock_bh(&n->lock); 263 write_lock_bh(&n->lock);
264} 264}
265 265
266static void tipc_node_write_unlock_fast(struct tipc_node *n)
267{
268 write_unlock_bh(&n->lock);
269}
270
266static void tipc_node_write_unlock(struct tipc_node *n) 271static void tipc_node_write_unlock(struct tipc_node *n)
267{ 272{
268 struct net *net = n->net; 273 struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
417 } 422 }
418 tipc_node_write_lock(n); 423 tipc_node_write_lock(n);
419 list_add_tail(subscr, &n->publ_list); 424 list_add_tail(subscr, &n->publ_list);
420 tipc_node_write_unlock(n); 425 tipc_node_write_unlock_fast(n);
421 tipc_node_put(n); 426 tipc_node_put(n);
422} 427}
423 428
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
435 } 440 }
436 tipc_node_write_lock(n); 441 tipc_node_write_lock(n);
437 list_del_init(subscr); 442 list_del_init(subscr);
438 tipc_node_write_unlock(n); 443 tipc_node_write_unlock_fast(n);
439 tipc_node_put(n); 444 tipc_node_put(n);
440} 445}
441 446
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849ce453d..3cd6402e812c 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,12 +86,12 @@ struct outqueue_entry {
86static void tipc_recv_work(struct work_struct *work); 86static void tipc_recv_work(struct work_struct *work);
87static void tipc_send_work(struct work_struct *work); 87static void tipc_send_work(struct work_struct *work);
88static void tipc_clean_outqueues(struct tipc_conn *con); 88static void tipc_clean_outqueues(struct tipc_conn *con);
89static void tipc_sock_release(struct tipc_conn *con);
90 89
91static void tipc_conn_kref_release(struct kref *kref) 90static void tipc_conn_kref_release(struct kref *kref)
92{ 91{
93 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); 92 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
94 struct sockaddr_tipc *saddr = con->server->saddr; 93 struct tipc_server *s = con->server;
94 struct sockaddr_tipc *saddr = s->saddr;
95 struct socket *sock = con->sock; 95 struct socket *sock = con->sock;
96 struct sock *sk; 96 struct sock *sk;
97 97
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
103 } 103 }
104 saddr->scope = -TIPC_NODE_SCOPE; 104 saddr->scope = -TIPC_NODE_SCOPE;
105 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); 105 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
106 tipc_sock_release(con);
107 sock_release(sock); 106 sock_release(sock);
108 con->sock = NULL; 107 con->sock = NULL;
108
109 spin_lock_bh(&s->idr_lock);
110 idr_remove(&s->conn_idr, con->conid);
111 s->idr_in_use--;
112 spin_unlock_bh(&s->idr_lock);
109 } 113 }
110 114
111 tipc_clean_outqueues(con); 115 tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
128 132
129 spin_lock_bh(&s->idr_lock); 133 spin_lock_bh(&s->idr_lock);
130 con = idr_find(&s->conn_idr, conid); 134 con = idr_find(&s->conn_idr, conid);
131 if (con) 135 if (con && test_bit(CF_CONNECTED, &con->flags))
132 conn_get(con); 136 conn_get(con);
137 else
138 con = NULL;
133 spin_unlock_bh(&s->idr_lock); 139 spin_unlock_bh(&s->idr_lock);
134 return con; 140 return con;
135} 141}
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
186 write_unlock_bh(&sk->sk_callback_lock); 192 write_unlock_bh(&sk->sk_callback_lock);
187} 193}
188 194
189static void tipc_sock_release(struct tipc_conn *con)
190{
191 struct tipc_server *s = con->server;
192
193 if (con->conid)
194 s->tipc_conn_release(con->conid, con->usr_data);
195
196 tipc_unregister_callbacks(con);
197}
198
199static void tipc_close_conn(struct tipc_conn *con) 195static void tipc_close_conn(struct tipc_conn *con)
200{ 196{
201 struct tipc_server *s = con->server; 197 struct tipc_server *s = con->server;
202 198
203 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { 199 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
200 tipc_unregister_callbacks(con);
204 201
205 spin_lock_bh(&s->idr_lock); 202 if (con->conid)
206 idr_remove(&s->conn_idr, con->conid); 203 s->tipc_conn_release(con->conid, con->usr_data);
207 s->idr_in_use--;
208 spin_unlock_bh(&s->idr_lock);
209 204
210 /* We shouldn't flush pending works as we may be in the 205 /* We shouldn't flush pending works as we may be in the
211 * thread. In fact the races with pending rx/tx work structs 206 * thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
458 if (!con) 453 if (!con)
459 return -EINVAL; 454 return -EINVAL;
460 455
456 if (!test_bit(CF_CONNECTED, &con->flags)) {
457 conn_put(con);
458 return 0;
459 }
460
461 e = tipc_alloc_entry(data, len); 461 e = tipc_alloc_entry(data, len);
462 if (!e) { 462 if (!e) {
463 conn_put(con); 463 conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
471 list_add_tail(&e->list, &con->outqueue); 471 list_add_tail(&e->list, &con->outqueue);
472 spin_unlock_bh(&con->outqueue_lock); 472 spin_unlock_bh(&con->outqueue_lock);
473 473
474 if (test_bit(CF_CONNECTED, &con->flags)) { 474 if (!queue_work(s->send_wq, &con->swork))
475 if (!queue_work(s->send_wq, &con->swork))
476 conn_put(con);
477 } else {
478 conn_put(con); 475 conn_put(con);
479 }
480 return 0; 476 return 0;
481} 477}
482 478
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
500 int ret; 496 int ret;
501 497
502 spin_lock_bh(&con->outqueue_lock); 498 spin_lock_bh(&con->outqueue_lock);
503 while (1) { 499 while (test_bit(CF_CONNECTED, &con->flags)) {
504 e = list_entry(con->outqueue.next, struct outqueue_entry, 500 e = list_entry(con->outqueue.next, struct outqueue_entry,
505 list); 501 list);
506 if ((struct list_head *) e == &con->outqueue) 502 if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
623void tipc_server_stop(struct tipc_server *s) 619void tipc_server_stop(struct tipc_server *s)
624{ 620{
625 struct tipc_conn *con; 621 struct tipc_conn *con;
626 int total = 0;
627 int id; 622 int id;
628 623
629 spin_lock_bh(&s->idr_lock); 624 spin_lock_bh(&s->idr_lock);
630 for (id = 0; total < s->idr_in_use; id++) { 625 for (id = 0; s->idr_in_use; id++) {
631 con = idr_find(&s->conn_idr, id); 626 con = idr_find(&s->conn_idr, id);
632 if (con) { 627 if (con) {
633 total++;
634 spin_unlock_bh(&s->idr_lock); 628 spin_unlock_bh(&s->idr_lock);
635 tipc_close_conn(con); 629 tipc_close_conn(con);
636 spin_lock_bh(&s->idr_lock); 630 spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd02244e21d..9d94e65d0894 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
54 54
55static void tipc_subscrp_delete(struct tipc_subscription *sub); 55static void tipc_subscrp_delete(struct tipc_subscription *sub);
56static void tipc_subscrb_put(struct tipc_subscriber *subscriber); 56static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
57static void tipc_subscrp_put(struct tipc_subscription *subscription);
58static void tipc_subscrp_get(struct tipc_subscription *subscription);
57 59
58/** 60/**
59 * htohl - convert value to endianness used by destination 61 * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
123{ 125{
124 struct tipc_name_seq seq; 126 struct tipc_name_seq seq;
125 127
128 tipc_subscrp_get(sub);
126 tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); 129 tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
127 if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) 130 if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
128 return; 131 return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
132 135
133 tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, 136 tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
134 node); 137 node);
138 tipc_subscrp_put(sub);
135} 139}
136 140
137static void tipc_subscrp_timeout(unsigned long data) 141static void tipc_subscrp_timeout(unsigned long data)
138{ 142{
139 struct tipc_subscription *sub = (struct tipc_subscription *)data; 143 struct tipc_subscription *sub = (struct tipc_subscription *)data;
140 struct tipc_subscriber *subscriber = sub->subscriber;
141 144
142 /* Notify subscriber of timeout */ 145 /* Notify subscriber of timeout */
143 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 146 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
144 TIPC_SUBSCR_TIMEOUT, 0, 0); 147 TIPC_SUBSCR_TIMEOUT, 0, 0);
145 148
146 spin_lock_bh(&subscriber->lock); 149 tipc_subscrp_put(sub);
147 tipc_subscrp_delete(sub);
148 spin_unlock_bh(&subscriber->lock);
149
150 tipc_subscrb_put(subscriber);
151} 150}
152 151
153static void tipc_subscrb_kref_release(struct kref *kref) 152static void tipc_subscrb_kref_release(struct kref *kref)
154{ 153{
155 struct tipc_subscriber *subcriber = container_of(kref, 154 kfree(container_of(kref,struct tipc_subscriber, kref));
156 struct tipc_subscriber, kref);
157
158 kfree(subcriber);
159} 155}
160 156
161static void tipc_subscrb_put(struct tipc_subscriber *subscriber) 157static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
168 kref_get(&subscriber->kref); 164 kref_get(&subscriber->kref);
169} 165}
170 166
167static void tipc_subscrp_kref_release(struct kref *kref)
168{
169 struct tipc_subscription *sub = container_of(kref,
170 struct tipc_subscription,
171 kref);
172 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
173 struct tipc_subscriber *subscriber = sub->subscriber;
174
175 spin_lock_bh(&subscriber->lock);
176 tipc_nametbl_unsubscribe(sub);
177 list_del(&sub->subscrp_list);
178 atomic_dec(&tn->subscription_count);
179 spin_unlock_bh(&subscriber->lock);
180 kfree(sub);
181 tipc_subscrb_put(subscriber);
182}
183
184static void tipc_subscrp_put(struct tipc_subscription *subscription)
185{
186 kref_put(&subscription->kref, tipc_subscrp_kref_release);
187}
188
189static void tipc_subscrp_get(struct tipc_subscription *subscription)
190{
191 kref_get(&subscription->kref);
192}
193
194/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
195 * subscriptions for a given subscriber.
196 */
197static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
198 struct tipc_subscr *s)
199{
200 struct list_head *subscription_list = &subscriber->subscrp_list;
201 struct tipc_subscription *sub, *temp;
202
203 spin_lock_bh(&subscriber->lock);
204 list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
205 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
206 continue;
207
208 tipc_subscrp_get(sub);
209 spin_unlock_bh(&subscriber->lock);
210 tipc_subscrp_delete(sub);
211 tipc_subscrp_put(sub);
212 spin_lock_bh(&subscriber->lock);
213
214 if (s)
215 break;
216 }
217 spin_unlock_bh(&subscriber->lock);
218}
219
171static struct tipc_subscriber *tipc_subscrb_create(int conid) 220static struct tipc_subscriber *tipc_subscrb_create(int conid)
172{ 221{
173 struct tipc_subscriber *subscriber; 222 struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
177 pr_warn("Subscriber rejected, no memory\n"); 226 pr_warn("Subscriber rejected, no memory\n");
178 return NULL; 227 return NULL;
179 } 228 }
180 kref_init(&subscriber->kref);
181 INIT_LIST_HEAD(&subscriber->subscrp_list); 229 INIT_LIST_HEAD(&subscriber->subscrp_list);
230 kref_init(&subscriber->kref);
182 subscriber->conid = conid; 231 subscriber->conid = conid;
183 spin_lock_init(&subscriber->lock); 232 spin_lock_init(&subscriber->lock);
184 233
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
187 236
188static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) 237static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
189{ 238{
190 struct tipc_subscription *sub, *temp; 239 tipc_subscrb_subscrp_delete(subscriber, NULL);
191 u32 timeout;
192
193 spin_lock_bh(&subscriber->lock);
194 /* Destroy any existing subscriptions for subscriber */
195 list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
196 subscrp_list) {
197 timeout = htohl(sub->evt.s.timeout, sub->swap);
198 if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
199 tipc_subscrp_delete(sub);
200 tipc_subscrb_put(subscriber);
201 }
202 }
203 spin_unlock_bh(&subscriber->lock);
204
205 tipc_subscrb_put(subscriber); 240 tipc_subscrb_put(subscriber);
206} 241}
207 242
208static void tipc_subscrp_delete(struct tipc_subscription *sub) 243static void tipc_subscrp_delete(struct tipc_subscription *sub)
209{ 244{
210 struct tipc_net *tn = net_generic(sub->net, tipc_net_id); 245 u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
211 246
212 tipc_nametbl_unsubscribe(sub); 247 if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
213 list_del(&sub->subscrp_list); 248 tipc_subscrp_put(sub);
214 kfree(sub);
215 atomic_dec(&tn->subscription_count);
216} 249}
217 250
218static void tipc_subscrp_cancel(struct tipc_subscr *s, 251static void tipc_subscrp_cancel(struct tipc_subscr *s,
219 struct tipc_subscriber *subscriber) 252 struct tipc_subscriber *subscriber)
220{ 253{
221 struct tipc_subscription *sub, *temp; 254 tipc_subscrb_subscrp_delete(subscriber, s);
222 u32 timeout;
223
224 spin_lock_bh(&subscriber->lock);
225 /* Find first matching subscription, exit if not found */
226 list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
227 subscrp_list) {
228 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
229 timeout = htohl(sub->evt.s.timeout, sub->swap);
230 if ((timeout == TIPC_WAIT_FOREVER) ||
231 del_timer(&sub->timer)) {
232 tipc_subscrp_delete(sub);
233 tipc_subscrb_put(subscriber);
234 }
235 break;
236 }
237 }
238 spin_unlock_bh(&subscriber->lock);
239} 255}
240 256
241static struct tipc_subscription *tipc_subscrp_create(struct net *net, 257static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
272 sub->swap = swap; 288 sub->swap = swap;
273 memcpy(&sub->evt.s, s, sizeof(*s)); 289 memcpy(&sub->evt.s, s, sizeof(*s));
274 atomic_inc(&tn->subscription_count); 290 atomic_inc(&tn->subscription_count);
291 kref_init(&sub->kref);
275 return sub; 292 return sub;
276} 293}
277 294
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
288 305
289 spin_lock_bh(&subscriber->lock); 306 spin_lock_bh(&subscriber->lock);
290 list_add(&sub->subscrp_list, &subscriber->subscrp_list); 307 list_add(&sub->subscrp_list, &subscriber->subscrp_list);
291 tipc_subscrb_get(subscriber);
292 sub->subscriber = subscriber; 308 sub->subscriber = subscriber;
293 tipc_nametbl_subscribe(sub); 309 tipc_nametbl_subscribe(sub);
310 tipc_subscrb_get(subscriber);
294 spin_unlock_bh(&subscriber->lock); 311 spin_unlock_bh(&subscriber->lock);
295 312
313 setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
296 timeout = htohl(sub->evt.s.timeout, swap); 314 timeout = htohl(sub->evt.s.timeout, swap);
297 if (timeout == TIPC_WAIT_FOREVER)
298 return;
299 315
300 setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); 316 if (timeout != TIPC_WAIT_FOREVER)
301 mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); 317 mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
302} 318}
303 319
304/* Handle one termination request for the subscriber */ 320/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103082c9..ffdc214c117a 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
57 * @evt: template for events generated by subscription 57 * @evt: template for events generated by subscription
58 */ 58 */
59struct tipc_subscription { 59struct tipc_subscription {
60 struct kref kref;
60 struct tipc_subscriber *subscriber; 61 struct tipc_subscriber *subscriber;
61 struct net *net; 62 struct net *net;
62 struct timer_list timer; 63 struct timer_list timer;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 127656ebe7be..cef79873b09d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
995 unsigned int hash; 995 unsigned int hash;
996 struct unix_address *addr; 996 struct unix_address *addr;
997 struct hlist_head *list; 997 struct hlist_head *list;
998 struct path path = { NULL, NULL };
998 999
999 err = -EINVAL; 1000 err = -EINVAL;
1000 if (sunaddr->sun_family != AF_UNIX) 1001 if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1010 goto out; 1011 goto out;
1011 addr_len = err; 1012 addr_len = err;
1012 1013
1014 if (sun_path[0]) {
1015 umode_t mode = S_IFSOCK |
1016 (SOCK_INODE(sock)->i_mode & ~current_umask());
1017 err = unix_mknod(sun_path, mode, &path);
1018 if (err) {
1019 if (err == -EEXIST)
1020 err = -EADDRINUSE;
1021 goto out;
1022 }
1023 }
1024
1013 err = mutex_lock_interruptible(&u->bindlock); 1025 err = mutex_lock_interruptible(&u->bindlock);
1014 if (err) 1026 if (err)
1015 goto out; 1027 goto out_put;
1016 1028
1017 err = -EINVAL; 1029 err = -EINVAL;
1018 if (u->addr) 1030 if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1029 atomic_set(&addr->refcnt, 1); 1041 atomic_set(&addr->refcnt, 1);
1030 1042
1031 if (sun_path[0]) { 1043 if (sun_path[0]) {
1032 struct path path;
1033 umode_t mode = S_IFSOCK |
1034 (SOCK_INODE(sock)->i_mode & ~current_umask());
1035 err = unix_mknod(sun_path, mode, &path);
1036 if (err) {
1037 if (err == -EEXIST)
1038 err = -EADDRINUSE;
1039 unix_release_addr(addr);
1040 goto out_up;
1041 }
1042 addr->hash = UNIX_HASH_SIZE; 1044 addr->hash = UNIX_HASH_SIZE;
1043 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1045 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1044 spin_lock(&unix_table_lock); 1046 spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
1065 spin_unlock(&unix_table_lock); 1067 spin_unlock(&unix_table_lock);
1066out_up: 1068out_up:
1067 mutex_unlock(&u->bindlock); 1069 mutex_unlock(&u->bindlock);
1070out_put:
1071 if (err)
1072 path_put(&path);
1068out: 1073out:
1069 return err; 1074 return err;
1070} 1075}
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
index 92a44729dbe4..7ef2a12b25b2 100644
--- a/samples/bpf/tc_l2_redirect_kern.c
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <uapi/linux/bpf.h> 8#include <uapi/linux/bpf.h>
8#include <uapi/linux/if_ether.h> 9#include <uapi/linux/if_ether.h>
9#include <uapi/linux/if_packet.h> 10#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
index 85c38ecd3a2d..0f4f6e8c8611 100644
--- a/samples/bpf/xdp_tx_iptunnel_kern.c
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -8,6 +8,7 @@
8 * encapsulating the incoming packet in an IPv4/v6 header 8 * encapsulating the incoming packet in an IPv4/v6 header
9 * and then XDP_TX it out. 9 * and then XDP_TX it out.
10 */ 10 */
11#define KBUILD_MODNAME "foo"
11#include <uapi/linux/bpf.h> 12#include <uapi/linux/bpf.h>
12#include <linux/in.h> 13#include <linux/in.h>
13#include <linux/if_ether.h> 14#include <linux/if_ether.h>
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index eadcd4d359d9..d883116ebaa4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -164,6 +164,7 @@ cmd_gensymtypes_c = \
164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \
165 $(GENKSYMS) $(if $(1), -T $(2)) \ 165 $(GENKSYMS) $(if $(1), -T $(2)) \
166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
167 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
167 $(if $(KBUILD_PRESERVE),-p) \ 168 $(if $(KBUILD_PRESERVE),-p) \
168 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 169 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
169 170
@@ -337,6 +338,7 @@ cmd_gensymtypes_S = \
337 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ 338 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
338 $(GENKSYMS) $(if $(1), -T $(2)) \ 339 $(GENKSYMS) $(if $(1), -T $(2)) \
339 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 340 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
341 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
340 $(if $(KBUILD_PRESERVE),-p) \ 342 $(if $(KBUILD_PRESERVE),-p) \
341 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 343 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
342 344
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 06121ce524a7..c9235d8340f1 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
44int in_source_file; 44int in_source_file;
45 45
46static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, 46static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
47 flag_preserve, flag_warnings; 47 flag_preserve, flag_warnings, flag_rel_crcs;
48static const char *mod_prefix = ""; 48static const char *mod_prefix = "";
49 49
50static int errors; 50static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
693 fputs(">\n", debugfile); 693 fputs(">\n", debugfile);
694 694
695 /* Used as a linker script. */ 695 /* Used as a linker script. */
696 printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); 696 printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
697 "SECTIONS { .rodata : ALIGN(4) { "
698 "%s__crc_%s = .; LONG(0x%08lx); } }\n",
699 mod_prefix, name, crc);
697 } 700 }
698} 701}
699 702
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
730 733
731static void genksyms_usage(void) 734static void genksyms_usage(void)
732{ 735{
733 fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" 736 fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
734#ifdef __GNU_LIBRARY__ 737#ifdef __GNU_LIBRARY__
735 " -s, --symbol-prefix Select symbol prefix\n" 738 " -s, --symbol-prefix Select symbol prefix\n"
736 " -d, --debug Increment the debug level (repeatable)\n" 739 " -d, --debug Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
742 " -q, --quiet Disable warnings (default)\n" 745 " -q, --quiet Disable warnings (default)\n"
743 " -h, --help Print this message\n" 746 " -h, --help Print this message\n"
744 " -V, --version Print the release version\n" 747 " -V, --version Print the release version\n"
748 " -R, --relative-crc Emit section relative symbol CRCs\n"
745#else /* __GNU_LIBRARY__ */ 749#else /* __GNU_LIBRARY__ */
746 " -s Select symbol prefix\n" 750 " -s Select symbol prefix\n"
747 " -d Increment the debug level (repeatable)\n" 751 " -d Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
753 " -q Disable warnings (default)\n" 757 " -q Disable warnings (default)\n"
754 " -h Print this message\n" 758 " -h Print this message\n"
755 " -V Print the release version\n" 759 " -V Print the release version\n"
760 " -R Emit section relative symbol CRCs\n"
756#endif /* __GNU_LIBRARY__ */ 761#endif /* __GNU_LIBRARY__ */
757 , stderr); 762 , stderr);
758} 763}
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
774 {"preserve", 0, 0, 'p'}, 779 {"preserve", 0, 0, 'p'},
775 {"version", 0, 0, 'V'}, 780 {"version", 0, 0, 'V'},
776 {"help", 0, 0, 'h'}, 781 {"help", 0, 0, 'h'},
782 {"relative-crc", 0, 0, 'R'},
777 {0, 0, 0, 0} 783 {0, 0, 0, 0}
778 }; 784 };
779 785
780 while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph", 786 while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
781 &long_opts[0], NULL)) != EOF) 787 &long_opts[0], NULL)) != EOF)
782#else /* __GNU_LIBRARY__ */ 788#else /* __GNU_LIBRARY__ */
783 while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF) 789 while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
784#endif /* __GNU_LIBRARY__ */ 790#endif /* __GNU_LIBRARY__ */
785 switch (o) { 791 switch (o) {
786 case 's': 792 case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
823 case 'h': 829 case 'h':
824 genksyms_usage(); 830 genksyms_usage();
825 return 0; 831 return 0;
832 case 'R':
833 flag_rel_crcs = 1;
834 break;
826 default: 835 default:
827 genksyms_usage(); 836 genksyms_usage();
828 return 1; 837 return 1;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 299b92ca1ae0..5d554419170b 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
219 "_SDA2_BASE_", /* ppc */ 219 "_SDA2_BASE_", /* ppc */
220 NULL }; 220 NULL };
221 221
222 static char *special_prefixes[] = {
223 "__crc_", /* modversions */
224 NULL };
225
222 static char *special_suffixes[] = { 226 static char *special_suffixes[] = {
223 "_veneer", /* arm */ 227 "_veneer", /* arm */
224 "_from_arm", /* arm */ 228 "_from_arm", /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
259 if (strcmp(sym_name, special_symbols[i]) == 0) 263 if (strcmp(sym_name, special_symbols[i]) == 0)
260 return 0; 264 return 0;
261 265
266 for (i = 0; special_prefixes[i]; i++) {
267 int l = strlen(special_prefixes[i]);
268
269 if (l <= strlen(sym_name) &&
270 strncmp(sym_name, special_prefixes[i], l) == 0)
271 return 0;
272 }
273
262 for (i = 0; special_suffixes[i]; i++) { 274 for (i = 0; special_suffixes[i]; i++) {
263 int l = strlen(sym_name) - strlen(special_suffixes[i]); 275 int l = strlen(sym_name) - strlen(special_suffixes[i]);
264 276
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 29c89a6bad3d..4dedd0d3d3a7 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
621 if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { 621 if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
622 is_crc = true; 622 is_crc = true;
623 crc = (unsigned int) sym->st_value; 623 crc = (unsigned int) sym->st_value;
624 if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
625 unsigned int *crcp;
626
627 /* symbol points to the CRC in the ELF object */
628 crcp = (void *)info->hdr + sym->st_value +
629 info->sechdrs[sym->st_shndx].sh_offset -
630 (info->hdr->e_type != ET_REL ?
631 info->sechdrs[sym->st_shndx].sh_addr : 0);
632 crc = *crcp;
633 }
624 sym_update_crc(symname + strlen(CRC_PFX), mod, crc, 634 sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
625 export); 635 export);
626 } 636 }
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 5e0dea2cdc01..039636ffb6c8 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
150 *type = INSN_RETURN; 150 *type = INSN_RETURN;
151 break; 151 break;
152 152
153 case 0xc5: /* iret */
154 case 0xca: /* retf */ 153 case 0xca: /* retf */
155 case 0xcb: /* retf */ 154 case 0xcb: /* retf */
155 case 0xcf: /* iret */
156 *type = INSN_CONTEXT_SWITCH; 156 *type = INSN_CONTEXT_SWITCH;
157 break; 157 break;
158 158
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index b13fed534d76..9f7bd1915c21 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
67 return map_subset(lru_map, expected) && map_subset(expected, lru_map); 67 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
68} 68}
69 69
70static int sched_next_online(int pid, int next_to_try) 70static int sched_next_online(int pid, int *next_to_try)
71{ 71{
72 cpu_set_t cpuset; 72 cpu_set_t cpuset;
73 int next = *next_to_try;
74 int ret = -1;
73 75
74 if (next_to_try == nr_cpus) 76 while (next < nr_cpus) {
75 return -1;
76
77 while (next_to_try < nr_cpus) {
78 CPU_ZERO(&cpuset); 77 CPU_ZERO(&cpuset);
79 CPU_SET(next_to_try++, &cpuset); 78 CPU_SET(next++, &cpuset);
80 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) 79 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
80 ret = 0;
81 break; 81 break;
82 }
82 } 83 }
83 84
84 return next_to_try; 85 *next_to_try = next;
86 return ret;
85} 87}
86 88
87/* Size of the LRU amp is 2 89/* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
96{ 98{
97 unsigned long long key, value[nr_cpus]; 99 unsigned long long key, value[nr_cpus];
98 int lru_map_fd, expected_map_fd; 100 int lru_map_fd, expected_map_fd;
101 int next_cpu = 0;
99 102
100 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 103 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
101 map_flags); 104 map_flags);
102 105
103 assert(sched_next_online(0, 0) != -1); 106 assert(sched_next_online(0, &next_cpu) != -1);
104 107
105 if (map_flags & BPF_F_NO_COMMON_LRU) 108 if (map_flags & BPF_F_NO_COMMON_LRU)
106 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); 109 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
183 int lru_map_fd, expected_map_fd; 186 int lru_map_fd, expected_map_fd;
184 unsigned int batch_size; 187 unsigned int batch_size;
185 unsigned int map_size; 188 unsigned int map_size;
189 int next_cpu = 0;
186 190
187 if (map_flags & BPF_F_NO_COMMON_LRU) 191 if (map_flags & BPF_F_NO_COMMON_LRU)
188 /* Ther percpu lru list (i.e each cpu has its own LRU 192 /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
196 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 200 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
197 map_flags); 201 map_flags);
198 202
199 assert(sched_next_online(0, 0) != -1); 203 assert(sched_next_online(0, &next_cpu) != -1);
200 204
201 batch_size = tgt_free / 2; 205 batch_size = tgt_free / 2;
202 assert(batch_size * 2 == tgt_free); 206 assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
262 int lru_map_fd, expected_map_fd; 266 int lru_map_fd, expected_map_fd;
263 unsigned int batch_size; 267 unsigned int batch_size;
264 unsigned int map_size; 268 unsigned int map_size;
269 int next_cpu = 0;
265 270
266 if (map_flags & BPF_F_NO_COMMON_LRU) 271 if (map_flags & BPF_F_NO_COMMON_LRU)
267 /* Ther percpu lru list (i.e each cpu has its own LRU 272 /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
275 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 280 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
276 map_flags); 281 map_flags);
277 282
278 assert(sched_next_online(0, 0) != -1); 283 assert(sched_next_online(0, &next_cpu) != -1);
279 284
280 batch_size = tgt_free / 2; 285 batch_size = tgt_free / 2;
281 assert(batch_size * 2 == tgt_free); 286 assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
370 int lru_map_fd, expected_map_fd; 375 int lru_map_fd, expected_map_fd;
371 unsigned int batch_size; 376 unsigned int batch_size;
372 unsigned int map_size; 377 unsigned int map_size;
378 int next_cpu = 0;
373 379
374 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 380 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
375 map_flags); 381 map_flags);
376 382
377 assert(sched_next_online(0, 0) != -1); 383 assert(sched_next_online(0, &next_cpu) != -1);
378 384
379 batch_size = tgt_free / 2; 385 batch_size = tgt_free / 2;
380 assert(batch_size * 2 == tgt_free); 386 assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
430 int lru_map_fd, expected_map_fd; 436 int lru_map_fd, expected_map_fd;
431 unsigned long long key, value[nr_cpus]; 437 unsigned long long key, value[nr_cpus];
432 unsigned long long end_key; 438 unsigned long long end_key;
439 int next_cpu = 0;
433 440
434 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 441 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
435 map_flags); 442 map_flags);
436 443
437 assert(sched_next_online(0, 0) != -1); 444 assert(sched_next_online(0, &next_cpu) != -1);
438 445
439 if (map_flags & BPF_F_NO_COMMON_LRU) 446 if (map_flags & BPF_F_NO_COMMON_LRU)
440 lru_map_fd = create_map(map_type, map_flags, 447 lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
502static void test_lru_sanity5(int map_type, int map_flags) 509static void test_lru_sanity5(int map_type, int map_flags)
503{ 510{
504 unsigned long long key, value[nr_cpus]; 511 unsigned long long key, value[nr_cpus];
505 int next_sched_cpu = 0; 512 int next_cpu = 0;
506 int map_fd; 513 int map_fd;
507 int i;
508 514
509 if (map_flags & BPF_F_NO_COMMON_LRU) 515 if (map_flags & BPF_F_NO_COMMON_LRU)
510 return; 516 return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
519 key = 0; 525 key = 0;
520 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 526 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
521 527
522 for (i = 0; i < nr_cpus; i++) { 528 while (sched_next_online(0, &next_cpu) != -1) {
523 pid_t pid; 529 pid_t pid;
524 530
525 pid = fork(); 531 pid = fork();
526 if (pid == 0) { 532 if (pid == 0) {
527 next_sched_cpu = sched_next_online(0, next_sched_cpu); 533 do_test_lru_sanity5(key, map_fd);
528 if (next_sched_cpu != -1)
529 do_test_lru_sanity5(key, map_fd);
530 exit(0); 534 exit(0);
531 } else if (pid == -1) { 535 } else if (pid == -1) {
532 printf("couldn't spawn #%d process\n", i); 536 printf("couldn't spawn process to test key:%llu\n",
537 key);
533 exit(1); 538 exit(1);
534 } else { 539 } else {
535 int status; 540 int status;
536 541
537 /* It is mostly redundant and just allow the parent
538 * process to update next_shced_cpu for the next child
539 * process
540 */
541 next_sched_cpu = sched_next_online(pid, next_sched_cpu);
542
543 assert(waitpid(pid, &status, 0) == pid); 542 assert(waitpid(pid, &status, 0) == pid);
544 assert(status == 0); 543 assert(status == 0);
545 key++; 544 key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
547 } 546 }
548 547
549 close(map_fd); 548 close(map_fd);
549 /* At least one key should be tested */
550 assert(key > 0);
550 551
551 printf("Pass\n"); 552 printf("Pass\n");
552} 553}